From f65ac577bc0dccf0aa75dc37ddf3b57f999d9fa2 Mon Sep 17 00:00:00 2001 From: Kbz-8 Date: Mon, 2 Sep 2024 09:44:42 +0200 Subject: [PATCH] big refactoring ! ci skip --- Makefile | 19 +- runtime/Includes/Core/Application.h | 46 +- runtime/Includes/Core/Application.inl | 13 +- runtime/Includes/Core/Enums.h | 24 +- runtime/Includes/Core/EventBase.h | 12 - runtime/Includes/Core/EventBus.h | 12 - runtime/Includes/Core/EventListener.h | 12 - runtime/Includes/Core/Format.h | 12 - runtime/Includes/Core/Format.inl | 13 +- runtime/Includes/Core/Fps.h | 12 - runtime/Includes/Core/Graphics.h | 12 - runtime/Includes/Core/Graphics.inl | 13 +- runtime/Includes/Core/ImagesRegistry.h | 12 - runtime/Includes/Core/ImagesRegistry.inl | 5 - runtime/Includes/Core/Logs.h | 12 - runtime/Includes/Core/Logs.inl | 13 +- runtime/Includes/Core/Memory.h | 12 - runtime/Includes/Core/Profiler.h | 12 - runtime/Includes/Core/SDLManager.h | 32 +- runtime/Includes/Core/UUID.h | 12 - runtime/Includes/Embedded/2DFragment.nzsl | 28 + runtime/Includes/Embedded/2DFragment.spv.h | 44 + runtime/Includes/Embedded/2DVertex.nzsl | 45 + runtime/Includes/Embedded/2DVertex.spv.h | 80 + .../Includes/{Utils => Embedded}/DogicaTTF.h | 12 - .../Includes/{Utils => Embedded}/IconMlx.h | 12 - runtime/Includes/Embedded/ScreenFragment.nzsl | 46 + .../Includes/Embedded/ScreenFragment.spv.h | 49 + runtime/Includes/Embedded/ScreenVertex.nzsl | 31 + runtime/Includes/Embedded/ScreenVertex.spv.h | 48 + runtime/Includes/Graphics/Mesh.h | 53 + runtime/Includes/Graphics/Scene.h | 32 + runtime/Includes/Graphics/Sprite.h | 53 + runtime/Includes/Maths/Angles.h | 108 + runtime/Includes/Maths/Angles.inl | 488 + runtime/Includes/Maths/Constants.h | 87 + runtime/Includes/Maths/Enums.h | 20 + runtime/Includes/Maths/EulerAngles.h | 57 + runtime/Includes/Maths/EulerAngles.inl | 169 + runtime/Includes/Maths/Mat4.h | 122 + runtime/Includes/Maths/Mat4.inl | 879 + runtime/Includes/Maths/MathsUtils.h | 26 + runtime/Includes/Maths/MathsUtils.inl | 47 + runtime/Includes/Maths/Quaternions.h | 91 + runtime/Includes/Maths/Quaternions.inl | 508 + runtime/Includes/Maths/Vec2.h | 116 + runtime/Includes/Maths/Vec2.inl | 388 + runtime/Includes/Maths/Vec3.h | 133 + runtime/Includes/Maths/Vec3.inl | 509 + runtime/Includes/Maths/Vec4.h | 115 + runtime/Includes/Maths/Vec4.inl | 424 + runtime/Includes/Platform/Inputs.h | 13 +- runtime/Includes/Platform/Window.h | 24 +- runtime/Includes/PreCompiled.h | 22 +- runtime/Includes/Renderer/Buffer.h | 83 + runtime/Includes/Renderer/Buffers/Buffer.h | 66 - .../Includes/Renderer/Buffers/IndexBuffer.h | 29 - .../Includes/Renderer/Buffers/UniformBuffer.h | 50 - .../Includes/Renderer/Buffers/VertexBuffer.h | 46 - .../Includes/Renderer/Command/CommandBuffer.h | 70 - .../Renderer/Command/CommandManager.h | 43 - .../Includes/Renderer/Command/CommandPool.h | 36 - .../Renderer/Command/CommandResource.h | 38 - .../Renderer/Command/SingleTimeCmdManager.h | 49 - runtime/Includes/Renderer/Core/Device.h | 40 - .../Includes/Renderer/Core/DrawableResource.h | 28 - runtime/Includes/Renderer/Core/Fence.h | 40 - runtime/Includes/Renderer/Core/Instance.h | 38 - runtime/Includes/Renderer/Core/Queues.h | 51 - runtime/Includes/Renderer/Core/RenderCore.h | 78 - runtime/Includes/Renderer/Core/Semaphore.h | 31 - runtime/Includes/Renderer/Core/Surface.h | 34 - .../Includes/Renderer/Core/ValidationLayers.h | 44 - runtime/Includes/Renderer/Descriptor.h | 48 + .../Renderer/Descriptors/DescriptorPool.h | 42 - .../Descriptors/DescriptorPoolManager.h | 35 - .../Renderer/Descriptors/DescriptorSet.h | 56 - .../Descriptors/DescriptorSetLayout.h | 38 - runtime/Includes/Renderer/Enums.h | 41 +- runtime/Includes/Renderer/Image.h | 102 + runtime/Includes/Renderer/Images/Image.h | 84 - runtime/Includes/Renderer/Images/Texture.h | 63 - .../Includes/Renderer/Images/TextureAtlas.h | 43 - .../Renderer/Images/TextureDescriptor.h | 59 - .../Renderer/Images/TextureRegistry.h | 39 - .../Renderer/Images/TextureRegistry.inl | 52 - runtime/Includes/Renderer/{Core => }/Memory.h | 18 +- .../Includes/Renderer/Pipelines/Graphics.h | 57 + .../Includes/Renderer/Pipelines/Pipeline.h | 35 +- runtime/Includes/Renderer/Pipelines/Shader.h | 68 + runtime/Includes/Renderer/PixelPut.h | 48 - runtime/Includes/Renderer/RenderCore.h | 39 + .../Includes/Renderer/RenderPasses/2DPass.h | 29 + .../Renderer/RenderPasses/FinalPass.h | 27 + .../Includes/Renderer/RenderPasses/Passes.h | 26 + runtime/Includes/Renderer/Renderer.h | 101 +- .../Renderer/Renderpass/FrameBuffer.h | 36 - .../Includes/Renderer/Renderpass/RenderPass.h | 36 - .../Includes/Renderer/Renderpass/Swapchain.h | 65 - runtime/Includes/Renderer/ScenesRenderer.h | 22 + runtime/Includes/Renderer/Texts/Font.h | 55 - runtime/Includes/Renderer/Texts/FontLibrary.h | 47 - runtime/Includes/Renderer/Texts/Text.h | 49 - .../Includes/Renderer/Texts/TextDescriptor.h | 62 - runtime/Includes/Renderer/Texts/TextLibrary.h | 48 - runtime/Includes/Renderer/Texts/TextManager.h | 43 - runtime/Includes/Renderer/Vertex.h | 61 +- runtime/Includes/Renderer/Vertex.inl | 36 + runtime/Includes/Renderer/ViewerData.h | 14 + .../Renderer/Vulkan/VulkanPrototypes.h | 170 + runtime/Includes/Utils/Ansi.h | 12 - runtime/Includes/Utils/Buffer.h | 41 + runtime/Includes/Utils/CombineHash.h | 12 - runtime/Includes/Utils/ConstMap.h | 12 - runtime/Includes/Utils/NonCopyable.h | 12 - runtime/Includes/Utils/NonOwningPtr.h | 12 - runtime/Includes/Utils/NonOwningPtr.inl | 13 +- runtime/Includes/Utils/Singleton.h | 12 - runtime/Sources/Core/Application.cpp | 12 - runtime/Sources/Core/Bridge.cpp | 12 - runtime/Sources/Core/EventBus.cpp | 12 - runtime/Sources/Core/EventListener.cpp | 12 - runtime/Sources/Core/Fps.cpp | 12 - runtime/Sources/Core/Graphics.cpp | 12 - runtime/Sources/Core/Logs.cpp | 12 - runtime/Sources/Core/Memory.cpp | 12 - runtime/Sources/Core/Profiler.cpp | 12 - runtime/Sources/Core/SDLManager.cpp | 106 +- runtime/Sources/Core/UUID.cpp | 14 +- runtime/Sources/Graphics/Mesh.cpp | 31 + runtime/Sources/Graphics/Scene.cpp | 19 + runtime/Sources/Graphics/Sprite.cpp | 44 + runtime/Sources/Platform/Inputs.cpp | 18 +- runtime/Sources/Platform/Window.cpp | 16 +- runtime/Sources/Renderer/Buffer.cpp | 175 + runtime/Sources/Renderer/Buffers/Buffer.cpp | 150 - .../Renderer/Buffers/UniformBuffer.cpp | 78 - .../Sources/Renderer/Buffers/VertexBuffer.cpp | 56 - .../Renderer/Command/CommandBuffer.cpp | 365 - .../Renderer/Command/CommandManager.cpp | 42 - .../Sources/Renderer/Command/CommandPool.cpp | 37 - .../Command/SingleTimeCommandManager.cpp | 64 - runtime/Sources/Renderer/Core/Device.cpp | 142 - runtime/Sources/Renderer/Core/Fence.cpp | 54 - runtime/Sources/Renderer/Core/Instance.cpp | 88 - runtime/Sources/Renderer/Core/Memory.cpp | 199 - runtime/Sources/Renderer/Core/Queues.cpp | 53 - runtime/Sources/Renderer/Core/RenderCore.cpp | 134 - runtime/Sources/Renderer/Core/Semaphore.cpp | 36 - runtime/Sources/Renderer/Core/Surface.cpp | 43 - .../Renderer/Core/ValidationLayers.cpp | 122 - runtime/Sources/Renderer/Descriptor.cpp | 141 + .../Renderer/Descriptors/DescriptorPool.cpp | 69 - .../Descriptors/DescriptorPoolManager.cpp | 41 - .../Renderer/Descriptors/DescriptorSet.cpp | 116 - .../Descriptors/DescriptorSetLayout.cpp | 49 - runtime/Sources/Renderer/Image.cpp | 113 + runtime/Sources/Renderer/Images/Image.cpp | 393 - runtime/Sources/Renderer/Images/Texture.cpp | 190 - .../Sources/Renderer/Images/TextureAtlas.cpp | 58 - runtime/Sources/Renderer/Memory.cpp | 159 + .../Sources/Renderer/Pipelines/Graphics.cpp | 164 + .../Sources/Renderer/Pipelines/Pipeline.cpp | 331 - runtime/Sources/Renderer/Pipelines/Shader.cpp | 83 + runtime/Sources/Renderer/PixelPut.cpp | 67 - runtime/Sources/Renderer/RenderCore.cpp | 78 + .../Sources/Renderer/RenderPasses/2DPass.cpp | 112 + .../Renderer/RenderPasses/FinalPass.cpp | 76 + .../Sources/Renderer/RenderPasses/Passes.cpp | 45 + runtime/Sources/Renderer/Renderer.cpp | 260 +- .../Renderer/Renderpass/Framebuffer.cpp | 49 - .../Renderer/Renderpass/Renderpass.cpp | 117 - .../Sources/Renderer/Renderpass/Swapchain.cpp | 150 - runtime/Sources/Renderer/SceneRenderer.cpp | 23 + runtime/Sources/Renderer/Texts/Font.cpp | 88 - .../Sources/Renderer/Texts/FontLibrary.cpp | 68 - runtime/Sources/Renderer/Texts/Text.cpp | 78 - .../Sources/Renderer/Texts/TextDescriptor.cpp | 107 - .../Sources/Renderer/Texts/TextLibrary.cpp | 62 - .../Sources/Renderer/Texts/TextManager.cpp | 65 - .../Sources/Renderer/Vulkan/VulkanLoader.cpp | 416 + .../Sources/Renderer/Vulkan/VulkanLoader.h | 42 + scripts/fetch_dependencies.sh | 17 +- third_party/glm/common.hpp | 539 - third_party/glm/detail/_features.hpp | 394 - third_party/glm/detail/_fixes.hpp | 27 - third_party/glm/detail/_noise.hpp | 81 - third_party/glm/detail/_swizzle.hpp | 804 - third_party/glm/detail/_swizzle_func.hpp | 682 - third_party/glm/detail/_vectorize.hpp | 162 - third_party/glm/detail/compute_common.hpp | 50 - .../glm/detail/compute_vector_relational.hpp | 30 - third_party/glm/detail/func_common.inl | 792 - third_party/glm/detail/func_common_simd.inl | 231 - third_party/glm/detail/func_exponential.inl | 152 - .../glm/detail/func_exponential_simd.inl | 37 - third_party/glm/detail/func_geometric.inl | 243 - .../glm/detail/func_geometric_simd.inl | 165 - third_party/glm/detail/func_integer.inl | 372 - third_party/glm/detail/func_integer_simd.inl | 65 - third_party/glm/detail/func_matrix.inl | 398 - third_party/glm/detail/func_matrix_simd.inl | 249 - third_party/glm/detail/func_packing.inl | 189 - third_party/glm/detail/func_packing_simd.inl | 6 - third_party/glm/detail/func_trigonometric.inl | 197 - .../glm/detail/func_trigonometric_simd.inl | 0 .../glm/detail/func_vector_relational.inl | 87 - .../detail/func_vector_relational_simd.inl | 6 - third_party/glm/detail/glm.cpp | 263 - third_party/glm/detail/qualifier.hpp | 230 - third_party/glm/detail/setup.hpp | 1135 - third_party/glm/detail/type_float.hpp | 68 - third_party/glm/detail/type_half.hpp | 16 - third_party/glm/detail/type_half.inl | 241 - third_party/glm/detail/type_mat2x2.hpp | 177 - third_party/glm/detail/type_mat2x2.inl | 536 - third_party/glm/detail/type_mat2x3.hpp | 159 - third_party/glm/detail/type_mat2x3.inl | 510 - third_party/glm/detail/type_mat2x4.hpp | 161 - third_party/glm/detail/type_mat2x4.inl | 520 - third_party/glm/detail/type_mat3x2.hpp | 167 - third_party/glm/detail/type_mat3x2.inl | 532 - third_party/glm/detail/type_mat3x3.hpp | 184 - third_party/glm/detail/type_mat3x3.inl | 601 - third_party/glm/detail/type_mat3x4.hpp | 166 - third_party/glm/detail/type_mat3x4.inl | 578 - third_party/glm/detail/type_mat4x2.hpp | 171 - third_party/glm/detail/type_mat4x2.inl | 574 - third_party/glm/detail/type_mat4x3.hpp | 171 - third_party/glm/detail/type_mat4x3.inl | 598 - third_party/glm/detail/type_mat4x4.hpp | 189 - third_party/glm/detail/type_mat4x4.inl | 706 - third_party/glm/detail/type_mat4x4_simd.inl | 6 - third_party/glm/detail/type_quat.hpp | 186 - third_party/glm/detail/type_quat.inl | 408 - third_party/glm/detail/type_quat_simd.inl | 188 - third_party/glm/detail/type_vec1.hpp | 308 - third_party/glm/detail/type_vec1.inl | 551 - third_party/glm/detail/type_vec2.hpp | 399 - third_party/glm/detail/type_vec2.inl | 913 - third_party/glm/detail/type_vec3.hpp | 432 - third_party/glm/detail/type_vec3.inl | 1068 - third_party/glm/detail/type_vec4.hpp | 505 - third_party/glm/detail/type_vec4.inl | 1140 - third_party/glm/detail/type_vec4_simd.inl | 775 - third_party/glm/exponential.hpp | 110 - third_party/glm/ext.hpp | 196 - third_party/glm/ext/matrix_clip_space.hpp | 522 - third_party/glm/ext/matrix_clip_space.inl | 555 - third_party/glm/ext/matrix_common.hpp | 36 - third_party/glm/ext/matrix_common.inl | 16 - third_party/glm/ext/matrix_double2x2.hpp | 23 - .../glm/ext/matrix_double2x2_precision.hpp | 49 - third_party/glm/ext/matrix_double2x3.hpp | 18 - .../glm/ext/matrix_double2x3_precision.hpp | 31 - third_party/glm/ext/matrix_double2x4.hpp | 18 - .../glm/ext/matrix_double2x4_precision.hpp | 31 - third_party/glm/ext/matrix_double3x2.hpp | 18 - .../glm/ext/matrix_double3x2_precision.hpp | 31 - third_party/glm/ext/matrix_double3x3.hpp | 23 - .../glm/ext/matrix_double3x3_precision.hpp | 49 - third_party/glm/ext/matrix_double3x4.hpp | 18 - .../glm/ext/matrix_double3x4_precision.hpp | 31 - third_party/glm/ext/matrix_double4x2.hpp | 18 - .../glm/ext/matrix_double4x2_precision.hpp | 31 - third_party/glm/ext/matrix_double4x3.hpp | 18 - .../glm/ext/matrix_double4x3_precision.hpp | 31 - third_party/glm/ext/matrix_double4x4.hpp | 23 - .../glm/ext/matrix_double4x4_precision.hpp | 49 - third_party/glm/ext/matrix_float2x2.hpp | 23 - .../glm/ext/matrix_float2x2_precision.hpp | 49 - third_party/glm/ext/matrix_float2x3.hpp | 18 - .../glm/ext/matrix_float2x3_precision.hpp | 31 - third_party/glm/ext/matrix_float2x4.hpp | 18 - .../glm/ext/matrix_float2x4_precision.hpp | 31 - third_party/glm/ext/matrix_float3x2.hpp | 18 - .../glm/ext/matrix_float3x2_precision.hpp | 31 - third_party/glm/ext/matrix_float3x3.hpp | 23 - .../glm/ext/matrix_float3x3_precision.hpp | 49 - third_party/glm/ext/matrix_float3x4.hpp | 18 - .../glm/ext/matrix_float3x4_precision.hpp | 31 - third_party/glm/ext/matrix_float4x2.hpp | 18 - .../glm/ext/matrix_float4x2_precision.hpp | 31 - third_party/glm/ext/matrix_float4x3.hpp | 18 - .../glm/ext/matrix_float4x3_precision.hpp | 31 - third_party/glm/ext/matrix_float4x4.hpp | 23 - .../glm/ext/matrix_float4x4_precision.hpp | 49 - third_party/glm/ext/matrix_projection.hpp | 149 - third_party/glm/ext/matrix_projection.inl | 104 - third_party/glm/ext/matrix_relational.hpp | 132 - third_party/glm/ext/matrix_relational.inl | 82 - third_party/glm/ext/matrix_transform.hpp | 144 - third_party/glm/ext/matrix_transform.inl | 152 - third_party/glm/ext/quaternion_common.hpp | 120 - third_party/glm/ext/quaternion_common.inl | 107 - .../glm/ext/quaternion_common_simd.inl | 18 - third_party/glm/ext/quaternion_double.hpp | 39 - .../glm/ext/quaternion_double_precision.hpp | 42 - .../glm/ext/quaternion_exponential.hpp | 63 - .../glm/ext/quaternion_exponential.inl | 85 - third_party/glm/ext/quaternion_float.hpp | 39 - .../glm/ext/quaternion_float_precision.hpp | 36 - third_party/glm/ext/quaternion_geometric.hpp | 70 - third_party/glm/ext/quaternion_geometric.inl | 36 - third_party/glm/ext/quaternion_relational.hpp | 62 - third_party/glm/ext/quaternion_relational.inl | 35 - third_party/glm/ext/quaternion_transform.hpp | 47 - third_party/glm/ext/quaternion_transform.inl | 24 - .../glm/ext/quaternion_trigonometric.hpp | 63 - .../glm/ext/quaternion_trigonometric.inl | 34 - third_party/glm/ext/scalar_common.hpp | 103 - third_party/glm/ext/scalar_common.inl | 115 - third_party/glm/ext/scalar_constants.hpp | 40 - third_party/glm/ext/scalar_constants.inl | 24 - third_party/glm/ext/scalar_int_sized.hpp | 70 - third_party/glm/ext/scalar_integer.hpp | 92 - third_party/glm/ext/scalar_integer.inl | 243 - third_party/glm/ext/scalar_relational.hpp | 65 - third_party/glm/ext/scalar_relational.inl | 40 - third_party/glm/ext/scalar_uint_sized.hpp | 70 - third_party/glm/ext/scalar_ulp.hpp | 74 - third_party/glm/ext/scalar_ulp.inl | 284 - third_party/glm/ext/vector_bool1.hpp | 30 - .../glm/ext/vector_bool1_precision.hpp | 34 - third_party/glm/ext/vector_bool2.hpp | 18 - .../glm/ext/vector_bool2_precision.hpp | 31 - third_party/glm/ext/vector_bool3.hpp | 18 - .../glm/ext/vector_bool3_precision.hpp | 31 - third_party/glm/ext/vector_bool4.hpp | 18 - .../glm/ext/vector_bool4_precision.hpp | 31 - third_party/glm/ext/vector_common.hpp | 144 - third_party/glm/ext/vector_common.inl | 88 - third_party/glm/ext/vector_double1.hpp | 31 - .../glm/ext/vector_double1_precision.hpp | 36 - third_party/glm/ext/vector_double2.hpp | 18 - .../glm/ext/vector_double2_precision.hpp | 31 - third_party/glm/ext/vector_double3.hpp | 18 - .../glm/ext/vector_double3_precision.hpp | 34 - third_party/glm/ext/vector_double4.hpp | 18 - .../glm/ext/vector_double4_precision.hpp | 35 - third_party/glm/ext/vector_float1.hpp | 31 - .../glm/ext/vector_float1_precision.hpp | 36 - third_party/glm/ext/vector_float2.hpp | 18 - .../glm/ext/vector_float2_precision.hpp | 31 - third_party/glm/ext/vector_float3.hpp | 18 - .../glm/ext/vector_float3_precision.hpp | 31 - third_party/glm/ext/vector_float4.hpp | 18 - .../glm/ext/vector_float4_precision.hpp | 31 - third_party/glm/ext/vector_int1.hpp | 32 - third_party/glm/ext/vector_int1_precision.hpp | 34 - third_party/glm/ext/vector_int2.hpp | 18 - third_party/glm/ext/vector_int2_precision.hpp | 31 - third_party/glm/ext/vector_int3.hpp | 18 - third_party/glm/ext/vector_int3_precision.hpp | 31 - third_party/glm/ext/vector_int4.hpp | 18 - third_party/glm/ext/vector_int4_precision.hpp | 31 - third_party/glm/ext/vector_integer.hpp | 149 - third_party/glm/ext/vector_integer.inl | 85 - third_party/glm/ext/vector_relational.hpp | 107 - third_party/glm/ext/vector_relational.inl | 75 - third_party/glm/ext/vector_uint1.hpp | 32 - .../glm/ext/vector_uint1_precision.hpp | 40 - third_party/glm/ext/vector_uint2.hpp | 18 - .../glm/ext/vector_uint2_precision.hpp | 31 - third_party/glm/ext/vector_uint3.hpp | 18 - .../glm/ext/vector_uint3_precision.hpp | 31 - third_party/glm/ext/vector_uint4.hpp | 18 - .../glm/ext/vector_uint4_precision.hpp | 31 - third_party/glm/ext/vector_ulp.hpp | 109 - third_party/glm/ext/vector_ulp.inl | 74 - third_party/glm/fwd.hpp | 818 - third_party/glm/geometric.hpp | 116 - third_party/glm/glm.hpp | 136 - third_party/glm/gtc/bitfield.hpp | 266 - third_party/glm/gtc/bitfield.inl | 626 - third_party/glm/gtc/color_space.hpp | 56 - third_party/glm/gtc/color_space.inl | 84 - third_party/glm/gtc/constants.hpp | 165 - third_party/glm/gtc/constants.inl | 167 - third_party/glm/gtc/epsilon.hpp | 60 - third_party/glm/gtc/epsilon.inl | 80 - third_party/glm/gtc/integer.hpp | 65 - third_party/glm/gtc/integer.inl | 68 - third_party/glm/gtc/matrix_access.hpp | 60 - third_party/glm/gtc/matrix_access.inl | 62 - third_party/glm/gtc/matrix_integer.hpp | 487 - third_party/glm/gtc/matrix_inverse.hpp | 50 - third_party/glm/gtc/matrix_inverse.inl | 118 - third_party/glm/gtc/matrix_transform.hpp | 36 - third_party/glm/gtc/matrix_transform.inl | 3 - third_party/glm/gtc/noise.hpp | 61 - third_party/glm/gtc/noise.inl | 807 - third_party/glm/gtc/packing.hpp | 728 - third_party/glm/gtc/packing.inl | 938 - third_party/glm/gtc/quaternion.hpp | 173 - third_party/glm/gtc/quaternion.inl | 200 - third_party/glm/gtc/quaternion_simd.inl | 0 third_party/glm/gtc/random.hpp | 82 - third_party/glm/gtc/random.inl | 303 - third_party/glm/gtc/reciprocal.hpp | 135 - third_party/glm/gtc/reciprocal.inl | 191 - third_party/glm/gtc/round.hpp | 160 - third_party/glm/gtc/round.inl | 155 - third_party/glm/gtc/type_aligned.hpp | 1315 - third_party/glm/gtc/type_precision.hpp | 2138 - third_party/glm/gtc/type_precision.inl | 6 - third_party/glm/gtc/type_ptr.hpp | 230 - third_party/glm/gtc/type_ptr.inl | 386 - third_party/glm/gtc/ulp.hpp | 152 - third_party/glm/gtc/ulp.inl | 173 - third_party/glm/gtc/vec1.hpp | 30 - third_party/glm/gtx/associated_min_max.hpp | 207 - third_party/glm/gtx/associated_min_max.inl | 354 - third_party/glm/gtx/bit.hpp | 98 - third_party/glm/gtx/bit.inl | 92 - third_party/glm/gtx/closest_point.hpp | 49 - third_party/glm/gtx/closest_point.inl | 45 - third_party/glm/gtx/color_encoding.hpp | 54 - third_party/glm/gtx/color_encoding.inl | 45 - third_party/glm/gtx/color_space.hpp | 72 - third_party/glm/gtx/color_space.inl | 141 - third_party/glm/gtx/color_space_YCoCg.hpp | 60 - third_party/glm/gtx/color_space_YCoCg.inl | 107 - third_party/glm/gtx/common.hpp | 76 - third_party/glm/gtx/common.inl | 125 - third_party/glm/gtx/compatibility.hpp | 133 - third_party/glm/gtx/compatibility.inl | 62 - third_party/glm/gtx/component_wise.hpp | 69 - third_party/glm/gtx/component_wise.inl | 127 - third_party/glm/gtx/dual_quaternion.hpp | 274 - third_party/glm/gtx/dual_quaternion.inl | 352 - third_party/glm/gtx/easing.hpp | 219 - third_party/glm/gtx/easing.inl | 436 - third_party/glm/gtx/euler_angles.hpp | 335 - third_party/glm/gtx/euler_angles.inl | 899 - third_party/glm/gtx/extend.hpp | 42 - third_party/glm/gtx/extend.inl | 48 - third_party/glm/gtx/extended_min_max.hpp | 182 - third_party/glm/gtx/extended_min_max.inl | 218 - third_party/glm/gtx/exterior_product.hpp | 45 - third_party/glm/gtx/exterior_product.inl | 26 - third_party/glm/gtx/fast_exponential.hpp | 95 - third_party/glm/gtx/fast_exponential.inl | 136 - third_party/glm/gtx/fast_square_root.hpp | 92 - third_party/glm/gtx/fast_square_root.inl | 75 - third_party/glm/gtx/fast_trigonometry.hpp | 79 - third_party/glm/gtx/fast_trigonometry.inl | 142 - third_party/glm/gtx/float_notmalize.inl | 13 - third_party/glm/gtx/functions.hpp | 56 - third_party/glm/gtx/functions.inl | 30 - third_party/glm/gtx/gradient_paint.hpp | 53 - third_party/glm/gtx/gradient_paint.inl | 36 - .../glm/gtx/handed_coordinate_space.hpp | 50 - .../glm/gtx/handed_coordinate_space.inl | 26 - third_party/glm/gtx/hash.hpp | 142 - third_party/glm/gtx/hash.inl | 184 - third_party/glm/gtx/integer.hpp | 76 - third_party/glm/gtx/integer.inl | 185 - third_party/glm/gtx/intersect.hpp | 92 - third_party/glm/gtx/intersect.inl | 200 - third_party/glm/gtx/io.hpp | 201 - third_party/glm/gtx/io.inl | 440 - third_party/glm/gtx/log_base.hpp | 48 - third_party/glm/gtx/log_base.inl | 16 - third_party/glm/gtx/matrix_cross_product.hpp | 47 - third_party/glm/gtx/matrix_cross_product.inl | 37 - third_party/glm/gtx/matrix_decompose.hpp | 46 - third_party/glm/gtx/matrix_decompose.inl | 186 - third_party/glm/gtx/matrix_factorisation.hpp | 69 - third_party/glm/gtx/matrix_factorisation.inl | 84 - third_party/glm/gtx/matrix_interpolation.hpp | 60 - third_party/glm/gtx/matrix_interpolation.inl | 129 - third_party/glm/gtx/matrix_major_storage.hpp | 119 - third_party/glm/gtx/matrix_major_storage.inl | 166 - third_party/glm/gtx/matrix_operation.hpp | 103 - third_party/glm/gtx/matrix_operation.inl | 176 - third_party/glm/gtx/matrix_query.hpp | 77 - third_party/glm/gtx/matrix_query.inl | 113 - third_party/glm/gtx/matrix_transform_2d.hpp | 81 - third_party/glm/gtx/matrix_transform_2d.inl | 68 - third_party/glm/gtx/mixed_product.hpp | 41 - third_party/glm/gtx/mixed_product.inl | 15 - third_party/glm/gtx/norm.hpp | 88 - third_party/glm/gtx/norm.inl | 95 - third_party/glm/gtx/normal.hpp | 41 - third_party/glm/gtx/normal.inl | 15 - third_party/glm/gtx/normalize_dot.hpp | 49 - third_party/glm/gtx/normalize_dot.inl | 16 - third_party/glm/gtx/number_precision.hpp | 61 - third_party/glm/gtx/number_precision.inl | 6 - third_party/glm/gtx/optimum_pow.hpp | 54 - third_party/glm/gtx/optimum_pow.inl | 22 - third_party/glm/gtx/orthonormalize.hpp | 49 - third_party/glm/gtx/orthonormalize.inl | 29 - third_party/glm/gtx/perpendicular.hpp | 41 - third_party/glm/gtx/perpendicular.inl | 10 - third_party/glm/gtx/polar_coordinates.hpp | 48 - third_party/glm/gtx/polar_coordinates.inl | 36 - third_party/glm/gtx/projection.hpp | 43 - third_party/glm/gtx/projection.inl | 10 - third_party/glm/gtx/quaternion.hpp | 174 - third_party/glm/gtx/quaternion.inl | 159 - third_party/glm/gtx/range.hpp | 98 - third_party/glm/gtx/raw_data.hpp | 51 - third_party/glm/gtx/raw_data.inl | 2 - .../glm/gtx/rotate_normalized_axis.hpp | 68 - .../glm/gtx/rotate_normalized_axis.inl | 58 - third_party/glm/gtx/rotate_vector.hpp | 123 - third_party/glm/gtx/rotate_vector.inl | 187 - third_party/glm/gtx/scalar_multiplication.hpp | 75 - third_party/glm/gtx/scalar_relational.hpp | 36 - third_party/glm/gtx/scalar_relational.inl | 88 - third_party/glm/gtx/spline.hpp | 65 - third_party/glm/gtx/spline.inl | 60 - third_party/glm/gtx/std_based_type.hpp | 68 - third_party/glm/gtx/std_based_type.inl | 6 - third_party/glm/gtx/string_cast.hpp | 52 - third_party/glm/gtx/string_cast.inl | 492 - third_party/glm/gtx/texture.hpp | 46 - third_party/glm/gtx/texture.inl | 17 - third_party/glm/gtx/transform.hpp | 60 - third_party/glm/gtx/transform.inl | 23 - third_party/glm/gtx/transform2.hpp | 89 - third_party/glm/gtx/transform2.inl | 125 - third_party/glm/gtx/type_aligned.hpp | 982 - third_party/glm/gtx/type_aligned.inl | 6 - third_party/glm/gtx/type_trait.hpp | 85 - third_party/glm/gtx/type_trait.inl | 61 - third_party/glm/gtx/vec_swizzle.hpp | 2782 -- third_party/glm/gtx/vector_angle.hpp | 57 - third_party/glm/gtx/vector_angle.inl | 44 - third_party/glm/gtx/vector_query.hpp | 66 - third_party/glm/gtx/vector_query.inl | 154 - third_party/glm/gtx/wrap.hpp | 55 - third_party/glm/gtx/wrap.inl | 57 - third_party/glm/integer.hpp | 212 - third_party/glm/mat2x2.hpp | 9 - third_party/glm/mat2x3.hpp | 9 - third_party/glm/mat2x4.hpp | 9 - third_party/glm/mat3x2.hpp | 9 - third_party/glm/mat3x3.hpp | 8 - third_party/glm/mat3x4.hpp | 8 - third_party/glm/mat4x2.hpp | 9 - third_party/glm/mat4x3.hpp | 8 - third_party/glm/mat4x4.hpp | 9 - third_party/glm/matrix.hpp | 161 - third_party/glm/packing.hpp | 173 - third_party/glm/simd/common.h | 240 - third_party/glm/simd/exponential.h | 20 - third_party/glm/simd/geometric.h | 124 - third_party/glm/simd/integer.h | 115 - third_party/glm/simd/matrix.h | 1028 - third_party/glm/simd/neon.h | 155 - third_party/glm/simd/packing.h | 8 - third_party/glm/simd/platform.h | 398 - third_party/glm/simd/trigonometric.h | 9 - third_party/glm/simd/vector_relational.h | 8 - third_party/glm/trigonometric.hpp | 210 - third_party/glm/vec2.hpp | 14 - third_party/glm/vec3.hpp | 14 - third_party/glm/vec4.hpp | 15 - third_party/glm/vector_relational.hpp | 121 - third_party/kvf.h | 2334 + third_party/vma.h | 38234 ++++++++-------- third_party/volk.c | 3041 -- third_party/volk.h | 1985 - third_party/vulkan/vulkan.cppm | 148 +- third_party/vulkan/vulkan.hpp | 1128 +- third_party/vulkan/vulkan_core.h | 436 +- third_party/vulkan/vulkan_enums.hpp | 622 +- .../vulkan/vulkan_extension_inspection.hpp | 88 +- third_party/vulkan/vulkan_format_traits.hpp | 18 +- third_party/vulkan/vulkan_funcs.hpp | 2472 +- third_party/vulkan/vulkan_handles.hpp | 1326 +- third_party/vulkan/vulkan_hash.hpp | 571 +- third_party/vulkan/vulkan_metal.h | 12 +- third_party/vulkan/vulkan_raii.hpp | 1405 +- third_party/vulkan/vulkan_shared.hpp | 50 +- .../vulkan/vulkan_static_assertions.hpp | 301 +- third_party/vulkan/vulkan_structs.hpp | 13556 +++--- third_party/vulkan/vulkan_to_string.hpp | 184 +- 581 files changed, 42971 insertions(+), 99170 deletions(-) create mode 100644 runtime/Includes/Embedded/2DFragment.nzsl create mode 100644 runtime/Includes/Embedded/2DFragment.spv.h create mode 100644 runtime/Includes/Embedded/2DVertex.nzsl create mode 100644 runtime/Includes/Embedded/2DVertex.spv.h rename runtime/Includes/{Utils => Embedded}/DogicaTTF.h (99%) rename runtime/Includes/{Utils => Embedded}/IconMlx.h (99%) create mode 100644 runtime/Includes/Embedded/ScreenFragment.nzsl create mode 100644 runtime/Includes/Embedded/ScreenFragment.spv.h create mode 100644 runtime/Includes/Embedded/ScreenVertex.nzsl create mode 100644 runtime/Includes/Embedded/ScreenVertex.spv.h create mode 100644 runtime/Includes/Graphics/Mesh.h create mode 100644 runtime/Includes/Graphics/Scene.h create mode 100644 runtime/Includes/Graphics/Sprite.h create mode 100644 runtime/Includes/Maths/Angles.h create mode 100644 runtime/Includes/Maths/Angles.inl create mode 100644 runtime/Includes/Maths/Constants.h create mode 100644 runtime/Includes/Maths/Enums.h create mode 100644 runtime/Includes/Maths/EulerAngles.h create mode 100644 runtime/Includes/Maths/EulerAngles.inl create mode 100644 runtime/Includes/Maths/Mat4.h create mode 100644 runtime/Includes/Maths/Mat4.inl create mode 100644 runtime/Includes/Maths/MathsUtils.h create mode 100644 runtime/Includes/Maths/MathsUtils.inl create mode 100644 runtime/Includes/Maths/Quaternions.h create mode 100644 runtime/Includes/Maths/Quaternions.inl create mode 100755 runtime/Includes/Maths/Vec2.h create mode 100755 runtime/Includes/Maths/Vec2.inl create mode 100755 runtime/Includes/Maths/Vec3.h create mode 100755 runtime/Includes/Maths/Vec3.inl create mode 100755 runtime/Includes/Maths/Vec4.h create mode 100755 runtime/Includes/Maths/Vec4.inl create mode 100644 runtime/Includes/Renderer/Buffer.h delete mode 100644 runtime/Includes/Renderer/Buffers/Buffer.h delete mode 100644 runtime/Includes/Renderer/Buffers/IndexBuffer.h delete mode 100644 runtime/Includes/Renderer/Buffers/UniformBuffer.h delete mode 100644 runtime/Includes/Renderer/Buffers/VertexBuffer.h delete mode 100644 runtime/Includes/Renderer/Command/CommandBuffer.h delete mode 100644 runtime/Includes/Renderer/Command/CommandManager.h delete mode 100644 runtime/Includes/Renderer/Command/CommandPool.h delete mode 100644 runtime/Includes/Renderer/Command/CommandResource.h delete mode 100644 runtime/Includes/Renderer/Command/SingleTimeCmdManager.h delete mode 100644 runtime/Includes/Renderer/Core/Device.h delete mode 100644 runtime/Includes/Renderer/Core/DrawableResource.h delete mode 100644 runtime/Includes/Renderer/Core/Fence.h delete mode 100644 runtime/Includes/Renderer/Core/Instance.h delete mode 100644 runtime/Includes/Renderer/Core/Queues.h delete mode 100644 runtime/Includes/Renderer/Core/RenderCore.h delete mode 100644 runtime/Includes/Renderer/Core/Semaphore.h delete mode 100644 runtime/Includes/Renderer/Core/Surface.h delete mode 100644 runtime/Includes/Renderer/Core/ValidationLayers.h create mode 100644 runtime/Includes/Renderer/Descriptor.h delete mode 100644 runtime/Includes/Renderer/Descriptors/DescriptorPool.h delete mode 100644 runtime/Includes/Renderer/Descriptors/DescriptorPoolManager.h delete mode 100644 runtime/Includes/Renderer/Descriptors/DescriptorSet.h delete mode 100644 runtime/Includes/Renderer/Descriptors/DescriptorSetLayout.h create mode 100644 runtime/Includes/Renderer/Image.h delete mode 100644 runtime/Includes/Renderer/Images/Image.h delete mode 100644 runtime/Includes/Renderer/Images/Texture.h delete mode 100644 runtime/Includes/Renderer/Images/TextureAtlas.h delete mode 100644 runtime/Includes/Renderer/Images/TextureDescriptor.h delete mode 100644 runtime/Includes/Renderer/Images/TextureRegistry.h delete mode 100644 runtime/Includes/Renderer/Images/TextureRegistry.inl rename runtime/Includes/Renderer/{Core => }/Memory.h (50%) create mode 100644 runtime/Includes/Renderer/Pipelines/Graphics.h create mode 100644 runtime/Includes/Renderer/Pipelines/Shader.h delete mode 100644 runtime/Includes/Renderer/PixelPut.h create mode 100644 runtime/Includes/Renderer/RenderCore.h create mode 100644 runtime/Includes/Renderer/RenderPasses/2DPass.h create mode 100644 runtime/Includes/Renderer/RenderPasses/FinalPass.h create mode 100644 runtime/Includes/Renderer/RenderPasses/Passes.h delete mode 100644 runtime/Includes/Renderer/Renderpass/FrameBuffer.h delete mode 100644 runtime/Includes/Renderer/Renderpass/RenderPass.h delete mode 100644 runtime/Includes/Renderer/Renderpass/Swapchain.h create mode 100644 runtime/Includes/Renderer/ScenesRenderer.h delete mode 100644 runtime/Includes/Renderer/Texts/Font.h delete mode 100644 runtime/Includes/Renderer/Texts/FontLibrary.h delete mode 100644 runtime/Includes/Renderer/Texts/Text.h delete mode 100644 runtime/Includes/Renderer/Texts/TextDescriptor.h delete mode 100644 runtime/Includes/Renderer/Texts/TextLibrary.h delete mode 100644 runtime/Includes/Renderer/Texts/TextManager.h create mode 100644 runtime/Includes/Renderer/Vertex.inl create mode 100644 runtime/Includes/Renderer/ViewerData.h create mode 100644 runtime/Includes/Renderer/Vulkan/VulkanPrototypes.h create mode 100644 runtime/Includes/Utils/Buffer.h create mode 100644 runtime/Sources/Graphics/Mesh.cpp create mode 100644 runtime/Sources/Graphics/Scene.cpp create mode 100644 runtime/Sources/Graphics/Sprite.cpp create mode 100644 runtime/Sources/Renderer/Buffer.cpp delete mode 100644 runtime/Sources/Renderer/Buffers/Buffer.cpp delete mode 100644 runtime/Sources/Renderer/Buffers/UniformBuffer.cpp delete mode 100644 runtime/Sources/Renderer/Buffers/VertexBuffer.cpp delete mode 100644 runtime/Sources/Renderer/Command/CommandBuffer.cpp delete mode 100644 runtime/Sources/Renderer/Command/CommandManager.cpp delete mode 100644 runtime/Sources/Renderer/Command/CommandPool.cpp delete mode 100644 runtime/Sources/Renderer/Command/SingleTimeCommandManager.cpp delete mode 100644 runtime/Sources/Renderer/Core/Device.cpp delete mode 100644 runtime/Sources/Renderer/Core/Fence.cpp delete mode 100644 runtime/Sources/Renderer/Core/Instance.cpp delete mode 100644 runtime/Sources/Renderer/Core/Memory.cpp delete mode 100644 runtime/Sources/Renderer/Core/Queues.cpp delete mode 100644 runtime/Sources/Renderer/Core/RenderCore.cpp delete mode 100644 runtime/Sources/Renderer/Core/Semaphore.cpp delete mode 100644 runtime/Sources/Renderer/Core/Surface.cpp delete mode 100644 runtime/Sources/Renderer/Core/ValidationLayers.cpp create mode 100644 runtime/Sources/Renderer/Descriptor.cpp delete mode 100644 runtime/Sources/Renderer/Descriptors/DescriptorPool.cpp delete mode 100644 runtime/Sources/Renderer/Descriptors/DescriptorPoolManager.cpp delete mode 100644 runtime/Sources/Renderer/Descriptors/DescriptorSet.cpp delete mode 100644 runtime/Sources/Renderer/Descriptors/DescriptorSetLayout.cpp create mode 100644 runtime/Sources/Renderer/Image.cpp delete mode 100644 runtime/Sources/Renderer/Images/Image.cpp delete mode 100644 runtime/Sources/Renderer/Images/Texture.cpp delete mode 100644 runtime/Sources/Renderer/Images/TextureAtlas.cpp create mode 100644 runtime/Sources/Renderer/Memory.cpp create mode 100644 runtime/Sources/Renderer/Pipelines/Graphics.cpp delete mode 100644 runtime/Sources/Renderer/Pipelines/Pipeline.cpp create mode 100644 runtime/Sources/Renderer/Pipelines/Shader.cpp delete mode 100644 runtime/Sources/Renderer/PixelPut.cpp create mode 100644 runtime/Sources/Renderer/RenderCore.cpp create mode 100644 runtime/Sources/Renderer/RenderPasses/2DPass.cpp create mode 100644 runtime/Sources/Renderer/RenderPasses/FinalPass.cpp create mode 100644 runtime/Sources/Renderer/RenderPasses/Passes.cpp delete mode 100644 runtime/Sources/Renderer/Renderpass/Framebuffer.cpp delete mode 100644 runtime/Sources/Renderer/Renderpass/Renderpass.cpp delete mode 100644 runtime/Sources/Renderer/Renderpass/Swapchain.cpp create mode 100644 runtime/Sources/Renderer/SceneRenderer.cpp delete mode 100644 runtime/Sources/Renderer/Texts/Font.cpp delete mode 100644 runtime/Sources/Renderer/Texts/FontLibrary.cpp delete mode 100644 runtime/Sources/Renderer/Texts/Text.cpp delete mode 100644 runtime/Sources/Renderer/Texts/TextDescriptor.cpp delete mode 100644 runtime/Sources/Renderer/Texts/TextLibrary.cpp delete mode 100644 runtime/Sources/Renderer/Texts/TextManager.cpp create mode 100644 runtime/Sources/Renderer/Vulkan/VulkanLoader.cpp create mode 100644 runtime/Sources/Renderer/Vulkan/VulkanLoader.h delete mode 100755 third_party/glm/common.hpp delete mode 100755 third_party/glm/detail/_features.hpp delete mode 100755 third_party/glm/detail/_fixes.hpp delete mode 100755 third_party/glm/detail/_noise.hpp delete mode 100755 third_party/glm/detail/_swizzle.hpp delete mode 100755 third_party/glm/detail/_swizzle_func.hpp delete mode 100755 third_party/glm/detail/_vectorize.hpp delete mode 100755 third_party/glm/detail/compute_common.hpp delete mode 100755 third_party/glm/detail/compute_vector_relational.hpp delete mode 100755 third_party/glm/detail/func_common.inl delete mode 100755 third_party/glm/detail/func_common_simd.inl delete mode 100755 third_party/glm/detail/func_exponential.inl delete mode 100755 third_party/glm/detail/func_exponential_simd.inl delete mode 100755 third_party/glm/detail/func_geometric.inl delete mode 100755 third_party/glm/detail/func_geometric_simd.inl delete mode 100755 third_party/glm/detail/func_integer.inl delete mode 100755 third_party/glm/detail/func_integer_simd.inl delete mode 100755 third_party/glm/detail/func_matrix.inl delete mode 100755 third_party/glm/detail/func_matrix_simd.inl delete mode 100755 third_party/glm/detail/func_packing.inl delete mode 100755 third_party/glm/detail/func_packing_simd.inl delete mode 100755 third_party/glm/detail/func_trigonometric.inl delete mode 100755 third_party/glm/detail/func_trigonometric_simd.inl delete mode 100755 third_party/glm/detail/func_vector_relational.inl delete mode 100755 third_party/glm/detail/func_vector_relational_simd.inl delete mode 100755 third_party/glm/detail/glm.cpp delete mode 100755 third_party/glm/detail/qualifier.hpp delete mode 100755 third_party/glm/detail/setup.hpp delete mode 100755 third_party/glm/detail/type_float.hpp delete mode 100755 third_party/glm/detail/type_half.hpp delete mode 100755 third_party/glm/detail/type_half.inl delete mode 100755 third_party/glm/detail/type_mat2x2.hpp delete mode 100755 third_party/glm/detail/type_mat2x2.inl delete mode 100755 third_party/glm/detail/type_mat2x3.hpp delete mode 100755 third_party/glm/detail/type_mat2x3.inl delete mode 100755 third_party/glm/detail/type_mat2x4.hpp delete mode 100755 third_party/glm/detail/type_mat2x4.inl delete mode 100755 third_party/glm/detail/type_mat3x2.hpp delete mode 100755 third_party/glm/detail/type_mat3x2.inl delete mode 100755 third_party/glm/detail/type_mat3x3.hpp delete mode 100755 third_party/glm/detail/type_mat3x3.inl delete mode 100755 third_party/glm/detail/type_mat3x4.hpp delete mode 100755 third_party/glm/detail/type_mat3x4.inl delete mode 100755 third_party/glm/detail/type_mat4x2.hpp delete mode 100755 third_party/glm/detail/type_mat4x2.inl delete mode 100755 third_party/glm/detail/type_mat4x3.hpp delete mode 100755 third_party/glm/detail/type_mat4x3.inl delete mode 100755 third_party/glm/detail/type_mat4x4.hpp delete mode 100755 third_party/glm/detail/type_mat4x4.inl delete mode 100755 third_party/glm/detail/type_mat4x4_simd.inl delete mode 100755 third_party/glm/detail/type_quat.hpp delete mode 100755 third_party/glm/detail/type_quat.inl delete mode 100755 third_party/glm/detail/type_quat_simd.inl delete mode 100755 third_party/glm/detail/type_vec1.hpp delete mode 100755 third_party/glm/detail/type_vec1.inl delete mode 100755 third_party/glm/detail/type_vec2.hpp delete mode 100755 third_party/glm/detail/type_vec2.inl delete mode 100755 third_party/glm/detail/type_vec3.hpp delete mode 100755 third_party/glm/detail/type_vec3.inl delete mode 100755 third_party/glm/detail/type_vec4.hpp delete mode 100755 third_party/glm/detail/type_vec4.inl delete mode 100755 third_party/glm/detail/type_vec4_simd.inl delete mode 100755 third_party/glm/exponential.hpp delete mode 100755 third_party/glm/ext.hpp delete mode 100755 third_party/glm/ext/matrix_clip_space.hpp delete mode 100755 third_party/glm/ext/matrix_clip_space.inl delete mode 100755 third_party/glm/ext/matrix_common.hpp delete mode 100755 third_party/glm/ext/matrix_common.inl delete mode 100755 third_party/glm/ext/matrix_double2x2.hpp delete mode 100755 third_party/glm/ext/matrix_double2x2_precision.hpp delete mode 100755 third_party/glm/ext/matrix_double2x3.hpp delete mode 100755 third_party/glm/ext/matrix_double2x3_precision.hpp delete mode 100755 third_party/glm/ext/matrix_double2x4.hpp delete mode 100755 third_party/glm/ext/matrix_double2x4_precision.hpp delete mode 100755 third_party/glm/ext/matrix_double3x2.hpp delete mode 100755 third_party/glm/ext/matrix_double3x2_precision.hpp delete mode 100755 third_party/glm/ext/matrix_double3x3.hpp delete mode 100755 third_party/glm/ext/matrix_double3x3_precision.hpp delete mode 100755 third_party/glm/ext/matrix_double3x4.hpp delete mode 100755 third_party/glm/ext/matrix_double3x4_precision.hpp delete mode 100755 third_party/glm/ext/matrix_double4x2.hpp delete mode 100755 third_party/glm/ext/matrix_double4x2_precision.hpp delete mode 100755 third_party/glm/ext/matrix_double4x3.hpp delete mode 100755 third_party/glm/ext/matrix_double4x3_precision.hpp delete mode 100755 third_party/glm/ext/matrix_double4x4.hpp delete mode 100755 third_party/glm/ext/matrix_double4x4_precision.hpp delete mode 100755 third_party/glm/ext/matrix_float2x2.hpp delete mode 100755 third_party/glm/ext/matrix_float2x2_precision.hpp delete mode 100755 third_party/glm/ext/matrix_float2x3.hpp delete mode 100755 third_party/glm/ext/matrix_float2x3_precision.hpp delete mode 100755 third_party/glm/ext/matrix_float2x4.hpp delete mode 100755 third_party/glm/ext/matrix_float2x4_precision.hpp delete mode 100755 third_party/glm/ext/matrix_float3x2.hpp delete mode 100755 third_party/glm/ext/matrix_float3x2_precision.hpp delete mode 100755 third_party/glm/ext/matrix_float3x3.hpp delete mode 100755 third_party/glm/ext/matrix_float3x3_precision.hpp delete mode 100755 third_party/glm/ext/matrix_float3x4.hpp delete mode 100755 third_party/glm/ext/matrix_float3x4_precision.hpp delete mode 100755 third_party/glm/ext/matrix_float4x2.hpp delete mode 100755 third_party/glm/ext/matrix_float4x2_precision.hpp delete mode 100755 third_party/glm/ext/matrix_float4x3.hpp delete mode 100755 third_party/glm/ext/matrix_float4x3_precision.hpp delete mode 100755 third_party/glm/ext/matrix_float4x4.hpp delete mode 100755 third_party/glm/ext/matrix_float4x4_precision.hpp delete mode 100755 third_party/glm/ext/matrix_projection.hpp delete mode 100755 third_party/glm/ext/matrix_projection.inl delete mode 100755 third_party/glm/ext/matrix_relational.hpp delete mode 100755 third_party/glm/ext/matrix_relational.inl delete mode 100755 third_party/glm/ext/matrix_transform.hpp delete mode 100755 third_party/glm/ext/matrix_transform.inl delete mode 100755 third_party/glm/ext/quaternion_common.hpp delete mode 100755 third_party/glm/ext/quaternion_common.inl delete mode 100755 third_party/glm/ext/quaternion_common_simd.inl delete mode 100755 third_party/glm/ext/quaternion_double.hpp delete mode 100755 third_party/glm/ext/quaternion_double_precision.hpp delete mode 100755 third_party/glm/ext/quaternion_exponential.hpp delete mode 100755 third_party/glm/ext/quaternion_exponential.inl delete mode 100755 third_party/glm/ext/quaternion_float.hpp delete mode 100755 third_party/glm/ext/quaternion_float_precision.hpp delete mode 100755 third_party/glm/ext/quaternion_geometric.hpp delete mode 100755 third_party/glm/ext/quaternion_geometric.inl delete mode 100755 third_party/glm/ext/quaternion_relational.hpp delete mode 100755 third_party/glm/ext/quaternion_relational.inl delete mode 100755 third_party/glm/ext/quaternion_transform.hpp delete mode 100755 third_party/glm/ext/quaternion_transform.inl delete mode 100755 third_party/glm/ext/quaternion_trigonometric.hpp delete mode 100755 third_party/glm/ext/quaternion_trigonometric.inl delete mode 100755 third_party/glm/ext/scalar_common.hpp delete mode 100755 third_party/glm/ext/scalar_common.inl delete mode 100755 third_party/glm/ext/scalar_constants.hpp delete mode 100755 third_party/glm/ext/scalar_constants.inl delete mode 100755 third_party/glm/ext/scalar_int_sized.hpp delete mode 100755 third_party/glm/ext/scalar_integer.hpp delete mode 100755 third_party/glm/ext/scalar_integer.inl delete mode 100755 third_party/glm/ext/scalar_relational.hpp delete mode 100755 third_party/glm/ext/scalar_relational.inl delete mode 100755 third_party/glm/ext/scalar_uint_sized.hpp delete mode 100755 third_party/glm/ext/scalar_ulp.hpp delete mode 100755 third_party/glm/ext/scalar_ulp.inl delete mode 100755 third_party/glm/ext/vector_bool1.hpp delete mode 100755 third_party/glm/ext/vector_bool1_precision.hpp delete mode 100755 third_party/glm/ext/vector_bool2.hpp delete mode 100755 third_party/glm/ext/vector_bool2_precision.hpp delete mode 100755 third_party/glm/ext/vector_bool3.hpp delete mode 100755 third_party/glm/ext/vector_bool3_precision.hpp delete mode 100755 third_party/glm/ext/vector_bool4.hpp delete mode 100755 third_party/glm/ext/vector_bool4_precision.hpp delete mode 100755 third_party/glm/ext/vector_common.hpp delete mode 100755 third_party/glm/ext/vector_common.inl delete mode 100755 third_party/glm/ext/vector_double1.hpp delete mode 100755 third_party/glm/ext/vector_double1_precision.hpp delete mode 100755 third_party/glm/ext/vector_double2.hpp delete mode 100755 third_party/glm/ext/vector_double2_precision.hpp delete mode 100755 third_party/glm/ext/vector_double3.hpp delete mode 100755 third_party/glm/ext/vector_double3_precision.hpp delete mode 100755 third_party/glm/ext/vector_double4.hpp delete mode 100755 third_party/glm/ext/vector_double4_precision.hpp delete mode 100755 third_party/glm/ext/vector_float1.hpp delete mode 100755 third_party/glm/ext/vector_float1_precision.hpp delete mode 100755 third_party/glm/ext/vector_float2.hpp delete mode 100755 third_party/glm/ext/vector_float2_precision.hpp delete mode 100755 third_party/glm/ext/vector_float3.hpp delete mode 100755 third_party/glm/ext/vector_float3_precision.hpp delete mode 100755 third_party/glm/ext/vector_float4.hpp delete mode 100755 third_party/glm/ext/vector_float4_precision.hpp delete mode 100755 third_party/glm/ext/vector_int1.hpp delete mode 100755 third_party/glm/ext/vector_int1_precision.hpp delete mode 100755 third_party/glm/ext/vector_int2.hpp delete mode 100755 third_party/glm/ext/vector_int2_precision.hpp delete mode 100755 third_party/glm/ext/vector_int3.hpp delete mode 100755 third_party/glm/ext/vector_int3_precision.hpp delete mode 100755 third_party/glm/ext/vector_int4.hpp delete mode 100755 third_party/glm/ext/vector_int4_precision.hpp delete mode 100755 third_party/glm/ext/vector_integer.hpp delete mode 100755 third_party/glm/ext/vector_integer.inl delete mode 100755 third_party/glm/ext/vector_relational.hpp delete mode 100755 third_party/glm/ext/vector_relational.inl delete mode 100755 third_party/glm/ext/vector_uint1.hpp delete mode 100755 third_party/glm/ext/vector_uint1_precision.hpp delete mode 100755 third_party/glm/ext/vector_uint2.hpp delete mode 100755 third_party/glm/ext/vector_uint2_precision.hpp delete mode 100755 third_party/glm/ext/vector_uint3.hpp delete mode 100755 third_party/glm/ext/vector_uint3_precision.hpp delete mode 100755 third_party/glm/ext/vector_uint4.hpp delete mode 100755 third_party/glm/ext/vector_uint4_precision.hpp delete mode 100755 third_party/glm/ext/vector_ulp.hpp delete mode 100755 third_party/glm/ext/vector_ulp.inl delete mode 100755 third_party/glm/fwd.hpp delete mode 100755 third_party/glm/geometric.hpp delete mode 100755 third_party/glm/glm.hpp delete mode 100755 third_party/glm/gtc/bitfield.hpp delete mode 100755 third_party/glm/gtc/bitfield.inl delete mode 100755 third_party/glm/gtc/color_space.hpp delete mode 100755 third_party/glm/gtc/color_space.inl delete mode 100755 third_party/glm/gtc/constants.hpp delete mode 100755 third_party/glm/gtc/constants.inl delete mode 100755 third_party/glm/gtc/epsilon.hpp delete mode 100755 third_party/glm/gtc/epsilon.inl delete mode 100755 third_party/glm/gtc/integer.hpp delete mode 100755 third_party/glm/gtc/integer.inl delete mode 100755 third_party/glm/gtc/matrix_access.hpp delete mode 100755 third_party/glm/gtc/matrix_access.inl delete mode 100755 third_party/glm/gtc/matrix_integer.hpp delete mode 100755 third_party/glm/gtc/matrix_inverse.hpp delete mode 100755 third_party/glm/gtc/matrix_inverse.inl delete mode 100755 third_party/glm/gtc/matrix_transform.hpp delete mode 100755 third_party/glm/gtc/matrix_transform.inl delete mode 100755 third_party/glm/gtc/noise.hpp delete mode 100755 third_party/glm/gtc/noise.inl delete mode 100755 third_party/glm/gtc/packing.hpp delete mode 100755 third_party/glm/gtc/packing.inl delete mode 100755 third_party/glm/gtc/quaternion.hpp delete mode 100755 third_party/glm/gtc/quaternion.inl delete mode 100755 third_party/glm/gtc/quaternion_simd.inl delete mode 100755 third_party/glm/gtc/random.hpp delete mode 100755 third_party/glm/gtc/random.inl delete mode 100755 third_party/glm/gtc/reciprocal.hpp delete mode 100755 third_party/glm/gtc/reciprocal.inl delete mode 100755 third_party/glm/gtc/round.hpp delete mode 100755 third_party/glm/gtc/round.inl delete mode 100755 third_party/glm/gtc/type_aligned.hpp delete mode 100755 third_party/glm/gtc/type_precision.hpp delete mode 100755 third_party/glm/gtc/type_precision.inl delete mode 100755 third_party/glm/gtc/type_ptr.hpp delete mode 100755 third_party/glm/gtc/type_ptr.inl delete mode 100755 third_party/glm/gtc/ulp.hpp delete mode 100755 third_party/glm/gtc/ulp.inl delete mode 100755 third_party/glm/gtc/vec1.hpp delete mode 100755 third_party/glm/gtx/associated_min_max.hpp delete mode 100755 third_party/glm/gtx/associated_min_max.inl delete mode 100755 third_party/glm/gtx/bit.hpp delete mode 100755 third_party/glm/gtx/bit.inl delete mode 100755 third_party/glm/gtx/closest_point.hpp delete mode 100755 third_party/glm/gtx/closest_point.inl delete mode 100755 third_party/glm/gtx/color_encoding.hpp delete mode 100755 third_party/glm/gtx/color_encoding.inl delete mode 100755 third_party/glm/gtx/color_space.hpp delete mode 100755 third_party/glm/gtx/color_space.inl delete mode 100755 third_party/glm/gtx/color_space_YCoCg.hpp delete mode 100755 third_party/glm/gtx/color_space_YCoCg.inl delete mode 100755 third_party/glm/gtx/common.hpp delete mode 100755 third_party/glm/gtx/common.inl delete mode 100755 third_party/glm/gtx/compatibility.hpp delete mode 100755 third_party/glm/gtx/compatibility.inl delete mode 100755 third_party/glm/gtx/component_wise.hpp delete mode 100755 third_party/glm/gtx/component_wise.inl delete mode 100755 third_party/glm/gtx/dual_quaternion.hpp delete mode 100755 third_party/glm/gtx/dual_quaternion.inl delete mode 100755 third_party/glm/gtx/easing.hpp delete mode 100755 third_party/glm/gtx/easing.inl delete mode 100755 third_party/glm/gtx/euler_angles.hpp delete mode 100755 third_party/glm/gtx/euler_angles.inl delete mode 100755 third_party/glm/gtx/extend.hpp delete mode 100755 third_party/glm/gtx/extend.inl delete mode 100755 third_party/glm/gtx/extended_min_max.hpp delete mode 100755 third_party/glm/gtx/extended_min_max.inl delete mode 100755 third_party/glm/gtx/exterior_product.hpp delete mode 100755 third_party/glm/gtx/exterior_product.inl delete mode 100755 third_party/glm/gtx/fast_exponential.hpp delete mode 100755 third_party/glm/gtx/fast_exponential.inl delete mode 100755 third_party/glm/gtx/fast_square_root.hpp delete mode 100755 third_party/glm/gtx/fast_square_root.inl delete mode 100755 third_party/glm/gtx/fast_trigonometry.hpp delete mode 100755 third_party/glm/gtx/fast_trigonometry.inl delete mode 100755 third_party/glm/gtx/float_notmalize.inl delete mode 100755 third_party/glm/gtx/functions.hpp delete mode 100755 third_party/glm/gtx/functions.inl delete mode 100755 third_party/glm/gtx/gradient_paint.hpp delete mode 100755 third_party/glm/gtx/gradient_paint.inl delete mode 100755 third_party/glm/gtx/handed_coordinate_space.hpp delete mode 100755 third_party/glm/gtx/handed_coordinate_space.inl delete mode 100755 third_party/glm/gtx/hash.hpp delete mode 100755 third_party/glm/gtx/hash.inl delete mode 100755 third_party/glm/gtx/integer.hpp delete mode 100755 third_party/glm/gtx/integer.inl delete mode 100755 third_party/glm/gtx/intersect.hpp delete mode 100755 third_party/glm/gtx/intersect.inl delete mode 100755 third_party/glm/gtx/io.hpp delete mode 100755 third_party/glm/gtx/io.inl delete mode 100755 third_party/glm/gtx/log_base.hpp delete mode 100755 third_party/glm/gtx/log_base.inl delete mode 100755 third_party/glm/gtx/matrix_cross_product.hpp delete mode 100755 third_party/glm/gtx/matrix_cross_product.inl delete mode 100755 third_party/glm/gtx/matrix_decompose.hpp delete mode 100755 third_party/glm/gtx/matrix_decompose.inl delete mode 100755 third_party/glm/gtx/matrix_factorisation.hpp delete mode 100755 third_party/glm/gtx/matrix_factorisation.inl delete mode 100755 third_party/glm/gtx/matrix_interpolation.hpp delete mode 100755 third_party/glm/gtx/matrix_interpolation.inl delete mode 100755 third_party/glm/gtx/matrix_major_storage.hpp delete mode 100755 third_party/glm/gtx/matrix_major_storage.inl delete mode 100755 third_party/glm/gtx/matrix_operation.hpp delete mode 100755 third_party/glm/gtx/matrix_operation.inl delete mode 100755 third_party/glm/gtx/matrix_query.hpp delete mode 100755 third_party/glm/gtx/matrix_query.inl delete mode 100755 third_party/glm/gtx/matrix_transform_2d.hpp delete mode 100755 third_party/glm/gtx/matrix_transform_2d.inl delete mode 100755 third_party/glm/gtx/mixed_product.hpp delete mode 100755 third_party/glm/gtx/mixed_product.inl delete mode 100755 third_party/glm/gtx/norm.hpp delete mode 100755 third_party/glm/gtx/norm.inl delete mode 100755 third_party/glm/gtx/normal.hpp delete mode 100755 third_party/glm/gtx/normal.inl delete mode 100755 third_party/glm/gtx/normalize_dot.hpp delete mode 100755 third_party/glm/gtx/normalize_dot.inl delete mode 100755 third_party/glm/gtx/number_precision.hpp delete mode 100755 third_party/glm/gtx/number_precision.inl delete mode 100755 third_party/glm/gtx/optimum_pow.hpp delete mode 100755 third_party/glm/gtx/optimum_pow.inl delete mode 100755 third_party/glm/gtx/orthonormalize.hpp delete mode 100755 third_party/glm/gtx/orthonormalize.inl delete mode 100755 third_party/glm/gtx/perpendicular.hpp delete mode 100755 third_party/glm/gtx/perpendicular.inl delete mode 100755 third_party/glm/gtx/polar_coordinates.hpp delete mode 100755 third_party/glm/gtx/polar_coordinates.inl delete mode 100755 third_party/glm/gtx/projection.hpp delete mode 100755 third_party/glm/gtx/projection.inl delete mode 100755 third_party/glm/gtx/quaternion.hpp delete mode 100755 third_party/glm/gtx/quaternion.inl delete mode 100755 third_party/glm/gtx/range.hpp delete mode 100755 third_party/glm/gtx/raw_data.hpp delete mode 100755 third_party/glm/gtx/raw_data.inl delete mode 100755 third_party/glm/gtx/rotate_normalized_axis.hpp delete mode 100755 third_party/glm/gtx/rotate_normalized_axis.inl delete mode 100755 third_party/glm/gtx/rotate_vector.hpp delete mode 100755 third_party/glm/gtx/rotate_vector.inl delete mode 100755 third_party/glm/gtx/scalar_multiplication.hpp delete mode 100755 third_party/glm/gtx/scalar_relational.hpp delete mode 100755 third_party/glm/gtx/scalar_relational.inl delete mode 100755 third_party/glm/gtx/spline.hpp delete mode 100755 third_party/glm/gtx/spline.inl delete mode 100755 third_party/glm/gtx/std_based_type.hpp delete mode 100755 third_party/glm/gtx/std_based_type.inl delete mode 100755 third_party/glm/gtx/string_cast.hpp delete mode 100755 third_party/glm/gtx/string_cast.inl delete mode 100755 third_party/glm/gtx/texture.hpp delete mode 100755 third_party/glm/gtx/texture.inl delete mode 100755 third_party/glm/gtx/transform.hpp delete mode 100755 third_party/glm/gtx/transform.inl delete mode 100755 third_party/glm/gtx/transform2.hpp delete mode 100755 third_party/glm/gtx/transform2.inl delete mode 100755 third_party/glm/gtx/type_aligned.hpp delete mode 100755 third_party/glm/gtx/type_aligned.inl delete mode 100755 third_party/glm/gtx/type_trait.hpp delete mode 100755 third_party/glm/gtx/type_trait.inl delete mode 100755 third_party/glm/gtx/vec_swizzle.hpp delete mode 100755 third_party/glm/gtx/vector_angle.hpp delete mode 100755 third_party/glm/gtx/vector_angle.inl delete mode 100755 third_party/glm/gtx/vector_query.hpp delete mode 100755 third_party/glm/gtx/vector_query.inl delete mode 100755 third_party/glm/gtx/wrap.hpp delete mode 100755 third_party/glm/gtx/wrap.inl delete mode 100755 third_party/glm/integer.hpp delete mode 100755 third_party/glm/mat2x2.hpp delete mode 100755 third_party/glm/mat2x3.hpp delete mode 100755 third_party/glm/mat2x4.hpp delete mode 100755 third_party/glm/mat3x2.hpp delete mode 100755 third_party/glm/mat3x3.hpp delete mode 100755 third_party/glm/mat3x4.hpp delete mode 100755 third_party/glm/mat4x2.hpp delete mode 100755 third_party/glm/mat4x3.hpp delete mode 100755 third_party/glm/mat4x4.hpp delete mode 100755 third_party/glm/matrix.hpp delete mode 100755 third_party/glm/packing.hpp delete mode 100755 third_party/glm/simd/common.h delete mode 100755 third_party/glm/simd/exponential.h delete mode 100755 third_party/glm/simd/geometric.h delete mode 100755 third_party/glm/simd/integer.h delete mode 100755 third_party/glm/simd/matrix.h delete mode 100755 third_party/glm/simd/neon.h delete mode 100755 third_party/glm/simd/packing.h delete mode 100755 third_party/glm/simd/platform.h delete mode 100755 third_party/glm/simd/trigonometric.h delete mode 100755 third_party/glm/simd/vector_relational.h delete mode 100755 third_party/glm/trigonometric.hpp delete mode 100755 third_party/glm/vec2.hpp delete mode 100755 third_party/glm/vec3.hpp delete mode 100755 third_party/glm/vec4.hpp delete mode 100755 third_party/glm/vector_relational.hpp create mode 100755 third_party/kvf.h delete mode 100644 third_party/volk.c delete mode 100644 third_party/volk.h diff --git a/Makefile b/Makefile index b0b4275..cb43f27 100644 --- a/Makefile +++ b/Makefile @@ -1,21 +1,12 @@ -# **************************************************************************** # -# # -# ::: :::::::: # -# Makefile :+: :+: :+: # -# +:+ +:+ +:+ # -# By: maldavid +#+ +:+ +#+ # -# +#+#+#+#+#+ +#+ # -# Created: 2022/10/04 16:43:41 by maldavid #+# #+# # -# Updated: 2024/07/05 13:34:03 by maldavid ### ########.fr # -# # -# **************************************************************************** # - NAME = libmlx.so SRCS = $(wildcard $(addsuffix /*.cpp, ./runtime/Sources/Core)) SRCS += $(wildcard $(addsuffix /*.cpp, ./runtime/Sources/Platform)) +SRCS += $(wildcard $(addsuffix /*.cpp, ./runtime/Sources/Graphics)) SRCS += $(wildcard $(addsuffix /*.cpp, ./runtime/Sources/Renderer)) -SRCS += $(wildcard $(addsuffix /*.cpp, ./runtime/Sources/Renderer/**)) +SRCS += $(wildcard $(addsuffix /*.cpp, ./runtime/Sources/Renderer/Vulkan)) +SRCS += $(wildcard $(addsuffix /*.cpp, ./runtime/Sources/Renderer/Pipelines)) +SRCS += $(wildcard $(addsuffix /*.cpp, ./runtime/Sources/Renderer/RenderPasses)) OBJ_DIR = objs/makefile OBJS = $(addprefix $(OBJ_DIR)/, $(SRCS:.cpp=.o)) @@ -37,7 +28,7 @@ MODE = "release" CXX = clang++ CXXFLAGS = -std=c++17 -O3 -fPIC -Wall -Wextra -Wno-deprecated -DSDL_MAIN_HANDLED -INCLUDES = -I./includes -I./runtime/Includes -I./third_party +INCLUDES = -I./includes -I./runtime/Includes -I./runtime/Sources -I./third_party LDLIBS = diff --git a/runtime/Includes/Core/Application.h b/runtime/Includes/Core/Application.h index fed04e2..ec74102 100644 --- a/runtime/Includes/Core/Application.h +++ b/runtime/Includes/Core/Application.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Application.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 21:49:46 by maldavid #+# #+# */ -/* Updated: 2024/07/05 14:04:19 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_APPLICATION__ #define __MLX_APPLICATION__ @@ -26,32 +14,32 @@ namespace mlx Application(); inline void GetMousePos(int* x, int* y) noexcept; - inline void MouseMove(void* win, int x, int y) noexcept; + inline void MouseMove(Handle win, int x, int y) noexcept; - inline void OnEvent(void* win, int event, int (*funct_ptr)(int, void*), void* param) noexcept; + inline void OnEvent(Handle win, int event, int (*funct_ptr)(int, void*), void* param) noexcept; - inline void GetScreenSize(void* win, int* w, int* h) noexcept; + inline void GetScreenSize(Handle win, int* w, int* h) noexcept; inline void SetFPSCap(std::uint32_t fps) noexcept; - inline void* NewGraphicsSuport(std::size_t w, std::size_t h, const char* title); - inline void ClearGraphicsSupport(void* win); - inline void DestroyGraphicsSupport(void* win); + inline Handle NewGraphicsSuport(std::size_t w, std::size_t h, const char* title); + inline void ClearGraphicsSupport(Handle win); + inline void DestroyGraphicsSupport(Handle win); - inline void PixelPut(void* win, int x, int y, std::uint32_t color) const noexcept; - inline void StringPut(void* win, int x, int y, std::uint32_t color, char* str); + inline void PixelPut(Handle win, int x, int y, std::uint32_t color) const noexcept; + inline void StringPut(Handle win, int x, int y, std::uint32_t color, char* str); - void* NewTexture(int w, int h); - void* NewStbTexture(char* file, int* w, int* h); // stb textures are image files (png, jpg, bpm, ...) - inline void TexturePut(void* win, void* img, int x, int y); - inline int GetTexturePixel(void* img, int x, int y); - inline void SetTexturePixel(void* img, int x, int y, std::uint32_t color); - void DestroyTexture(void* ptr); + Handle NewTexture(int w, int h); + Handle NewStbTexture(char* file, int* w, int* h); // stb textures are image files (png, jpg, bpm, ...) + inline void TexturePut(Handle win, Handle img, int x, int y); + inline int GetTexturePixel(Handle img, int x, int y); + inline void SetTexturePixel(Handle img, int x, int y, std::uint32_t color); + void DestroyTexture(Handle ptr); inline void LoopHook(int (*f)(void*), void* param); inline void LoopEnd() noexcept; - inline void LoadFont(void* win, const std::filesystem::path& filepath, float scale); + inline void LoadFont(Handle win, const std::filesystem::path& filepath, float scale); void Run() noexcept; @@ -62,8 +50,8 @@ namespace mlx Inputs m_in; ImageRegistry m_image_registry; std::vector> m_graphics; - std::function f_loop_hook; - void* p_param = nullptr; + std::function f_loop_hook; + Handle p_param = nullptr; }; } diff --git a/runtime/Includes/Core/Application.inl b/runtime/Includes/Core/Application.inl index cd95318..3193aee 100644 --- a/runtime/Includes/Core/Application.inl +++ b/runtime/Includes/Core/Application.inl @@ -1,15 +1,4 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Application.inl :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 21:49:46 by maldavid #+# #+# */ -/* Updated: 2024/04/23 14:45:07 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - +#pragma once #include #define CHECK_WINDOW_PTR(win) \ diff --git a/runtime/Includes/Core/Enums.h b/runtime/Includes/Core/Enums.h index 68f036b..082373e 100644 --- a/runtime/Includes/Core/Enums.h +++ b/runtime/Includes/Core/Enums.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Enums.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 17:15:24 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:23:10 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_CORE_ENUMS__ #define __MLX_CORE_ENUMS__ @@ -27,8 +15,18 @@ namespace mlx EndEnum }; - constexpr std::size_t LogTypeCount = static_cast(LogType::EndEnum); + + enum class Event + { + ResizeEventCode = 56, + FrameBeginEventCode = 57, + FatalErrorEventCode = 168, + QuitEventCode = 168, + + EndEnum + }; + constexpr std::size_t EventCount = static_cast(Event::EndEnum); } #endif diff --git a/runtime/Includes/Core/EventBase.h b/runtime/Includes/Core/EventBase.h index 37b5944..7f4464a 100644 --- a/runtime/Includes/Core/EventBase.h +++ b/runtime/Includes/Core/EventBase.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* EventBase.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 17:27:22 by maldavid #+# #+# */ -/* Updated: 2024/03/27 17:31:16 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_BASE_EVENT__ #define __MLX_BASE_EVENT__ diff --git a/runtime/Includes/Core/EventBus.h b/runtime/Includes/Core/EventBus.h index db110d3..4b51f15 100644 --- a/runtime/Includes/Core/EventBus.h +++ b/runtime/Includes/Core/EventBus.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* EventBus.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 17:30:36 by maldavid #+# #+# */ -/* Updated: 2024/03/27 17:31:41 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_EVENT_BUS__ #define __MLX_EVENT_BUS__ diff --git a/runtime/Includes/Core/EventListener.h b/runtime/Includes/Core/EventListener.h index d6c002c..4907aa4 100644 --- a/runtime/Includes/Core/EventListener.h +++ b/runtime/Includes/Core/EventListener.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* EventListener.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 17:28:17 by maldavid #+# #+# */ -/* Updated: 2024/03/27 17:37:53 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_EVENT_LISTENER__ #define __MLX_EVENT_LISTENER__ diff --git a/runtime/Includes/Core/Format.h b/runtime/Includes/Core/Format.h index d55b824..e948709 100644 --- a/runtime/Includes/Core/Format.h +++ b/runtime/Includes/Core/Format.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Format.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 17:11:09 by maldavid #+# #+# */ -/* Updated: 2024/03/27 17:12:03 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_FORMAT__ #define __MLX_FORMAT__ diff --git a/runtime/Includes/Core/Format.inl b/runtime/Includes/Core/Format.inl index e6d94d2..3bc490a 100644 --- a/runtime/Includes/Core/Format.inl +++ b/runtime/Includes/Core/Format.inl @@ -1,15 +1,4 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Format.inl :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 17:11:09 by maldavid #+# #+# */ -/* Updated: 2024/03/27 17:12:03 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - +#pragma once #include #include #include diff --git a/runtime/Includes/Core/Fps.h b/runtime/Includes/Core/Fps.h index 0803bed..6cebc77 100644 --- a/runtime/Includes/Core/Fps.h +++ b/runtime/Includes/Core/Fps.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Fps.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/18 14:53:30 by maldavid #+# #+# */ -/* Updated: 2024/03/27 20:52:06 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_FPS__ #define __MLX_FPS__ diff --git a/runtime/Includes/Core/Graphics.h b/runtime/Includes/Core/Graphics.h index 8ab5bbf..842c2c9 100644 --- a/runtime/Includes/Core/Graphics.h +++ b/runtime/Includes/Core/Graphics.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Graphics.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/02 14:49:49 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:46:58 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_GRAPHICS__ #define __MLX_GRAPHICS__ diff --git a/runtime/Includes/Core/Graphics.inl b/runtime/Includes/Core/Graphics.inl index 50573a1..10286cc 100644 --- a/runtime/Includes/Core/Graphics.inl +++ b/runtime/Includes/Core/Graphics.inl @@ -1,15 +1,4 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* graphics.inl :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/02 15:13:55 by maldavid #+# #+# */ -/* Updated: 2023/04/02 15:26:16 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - +#pragma once #include namespace mlx diff --git a/runtime/Includes/Core/ImagesRegistry.h b/runtime/Includes/Core/ImagesRegistry.h index e2bd494..01c471b 100644 --- a/runtime/Includes/Core/ImagesRegistry.h +++ b/runtime/Includes/Core/ImagesRegistry.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* ImagesRegistry.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/04/03 15:11:47 by maldavid #+# #+# */ -/* Updated: 2024/04/21 20:31:00 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_CORE_IMAGES_REGISTRY__ #define __MLX_CORE_IMAGES_REGISTRY__ diff --git a/runtime/Includes/Core/ImagesRegistry.inl b/runtime/Includes/Core/ImagesRegistry.inl index 2a69334..c7a2909 100644 --- a/runtime/Includes/Core/ImagesRegistry.inl +++ b/runtime/Includes/Core/ImagesRegistry.inl @@ -1,8 +1,3 @@ -// This file is a part of Akel -// Authors : @kbz_8 -// Created : 21/04/2024 -// Updated : 21/04/2024 - #pragma once #include diff --git a/runtime/Includes/Core/Logs.h b/runtime/Includes/Core/Logs.h index edd816b..906f969 100644 --- a/runtime/Includes/Core/Logs.h +++ b/runtime/Includes/Core/Logs.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Logs.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 17:14:10 by maldavid #+# #+# */ -/* Updated: 2024/03/27 17:19:23 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_LOGS__ #define __MLX_LOGS__ diff --git a/runtime/Includes/Core/Logs.inl b/runtime/Includes/Core/Logs.inl index 2c33f55..744fbaa 100644 --- a/runtime/Includes/Core/Logs.inl +++ b/runtime/Includes/Core/Logs.inl @@ -1,15 +1,4 @@ -/* **************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Logs.inl :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 17:19:47 by maldavid #+# #+# */ -/* Updated: 2024/03/27 17:19:47 by maldavid ### ########.fr */ -/* */ -/* **************************************************************************** */ - +#pragma once #include #include diff --git a/runtime/Includes/Core/Memory.h b/runtime/Includes/Core/Memory.h index db11105..4a51eb5 100644 --- a/runtime/Includes/Core/Memory.h +++ b/runtime/Includes/Core/Memory.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Memory.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/07 16:31:51 by kbz_8 #+# #+# */ -/* Updated: 2024/03/27 21:16:44 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_MEMORY__ #define __MLX_MEMORY__ diff --git a/runtime/Includes/Core/Profiler.h b/runtime/Includes/Core/Profiler.h index 9a24f42..ec977bc 100644 --- a/runtime/Includes/Core/Profiler.h +++ b/runtime/Includes/Core/Profiler.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Profiler.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/10 13:35:45 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:24:17 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_PROFILER__ #define __MLX_PROFILER__ diff --git a/runtime/Includes/Core/SDLManager.h b/runtime/Includes/Core/SDLManager.h index 8ad7ea7..5fcac81 100644 --- a/runtime/Includes/Core/SDLManager.h +++ b/runtime/Includes/Core/SDLManager.h @@ -1,19 +1,7 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* SDLManager.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/05/25 15:28:59 by maldavid #+# #+# */ -/* Updated: 2024/07/05 22:15:22 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_SDL_MANAGER__ #define __MLX_SDL_MANAGER__ -#include +#include namespace mlx { @@ -25,17 +13,27 @@ namespace mlx void Init() noexcept; void Shutdown() noexcept; - void* CreateWindow(const std::string& title, std::size_t w, std::size_t h); - void DestroyWindow(void* window) noexcept; + Handle CreateWindow(const std::string& title, std::size_t w, std::size_t h, bool hidden); + void DestroyWindow(Handle window) noexcept; - void SetEventCallback(); + VkSurfaceKHR CreateVulkanSurface(Handle window, VkInstance instance) const noexcept; + std::vector GetRequiredVulkanInstanceExtentions(Handle window) const noexcept; + Vec2ui GetVulkanDrawableSize(Handle window) const noexcept; + + inline void SetEventCallback(func::function functor, void* userdata) { f_callback = std::move(functor); p_callback_data = userdata; } private: SDLManager() = default; ~SDLManager() = default; private: - std::unordered_set m_windows_registry; + std::unordered_set m_windows_registry; + func::function f_callback; + void* p_callback_data = nullptr; + std::int32_t m_x; + std::int32_t m_y; + std::int32_t m_rel_x; + std::int32_t m_rel_y; bool m_drop_sdl_responsability = false; }; } diff --git a/runtime/Includes/Core/UUID.h b/runtime/Includes/Core/UUID.h index 0b4075f..9911302 100644 --- a/runtime/Includes/Core/UUID.h +++ b/runtime/Includes/Core/UUID.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* UUID.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/06 11:13:23 by maldavid #+# #+# */ -/* Updated: 2024/03/27 21:19:18 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_UUID__ #define __MLX_UUID__ diff --git a/runtime/Includes/Embedded/2DFragment.nzsl b/runtime/Includes/Embedded/2DFragment.nzsl new file mode 100644 index 0000000..a972887 --- /dev/null +++ b/runtime/Includes/Embedded/2DFragment.nzsl @@ -0,0 +1,28 @@ +[nzsl_version("1.0")] +module; + +struct VertOut +{ + [location(0)] color: vec4[f32], + [location(1)] uv: vec2[f32] +} + +struct FragOut +{ + [location(0)] color: vec4[f32] +} + +external +{ + [set(1), binding(0)] u_texture: sampler2D[f32] +} + +[entry(frag)] +fn main(input: VertOut) -> FragOut +{ + let output: FragOut; + output.color = input.color * u_texture.Sample(input.uv); + if(output.color.w == 0.0) + discard; + return output; +} diff --git a/runtime/Includes/Embedded/2DFragment.spv.h b/runtime/Includes/Embedded/2DFragment.spv.h new file mode 100644 index 0000000..5792158 --- /dev/null +++ b/runtime/Includes/Embedded/2DFragment.spv.h @@ -0,0 +1,44 @@ +3,2,35,7,0,0,1,0,39,0,0,0,51,0,0,0,0,0,0,0,17,0,2,0,1,0,0,0,14,0, +3,0,0,0,0,0,1,0,0,0,15,0,8,0,4,0,0,0,28,0,0,0,109,97,105,110,0,0,0,0, +10,0,0,0,16,0,0,0,22,0,0,0,16,0,3,0,28,0,0,0,7,0,0,0,3,0,3,0,0,0, +0,0,100,0,0,0,5,0,4,0,19,0,0,0,86,101,114,116,79,117,116,0,6,0,5,0,19,0,0,0, +0,0,0,0,99,111,108,111,114,0,0,0,6,0,4,0,19,0,0,0,1,0,0,0,117,118,0,0,5,0, +4,0,23,0,0,0,70,114,97,103,79,117,116,0,6,0,5,0,23,0,0,0,0,0,0,0,99,111,108,111, +114,0,0,0,5,0,5,0,5,0,0,0,117,95,116,101,120,116,117,114,101,0,0,0,5,0,4,0,10,0, +0,0,99,111,108,111,114,0,0,0,5,0,3,0,16,0,0,0,117,118,0,0,5,0,4,0,22,0,0,0, +99,111,108,111,114,0,0,0,5,0,4,0,28,0,0,0,109,97,105,110,0,0,0,0,71,0,4,0,5,0, +0,0,33,0,0,0,0,0,0,0,71,0,4,0,5,0,0,0,34,0,0,0,1,0,0,0,71,0,4,0, +10,0,0,0,30,0,0,0,0,0,0,0,71,0,4,0,16,0,0,0,30,0,0,0,1,0,0,0,71,0, +4,0,22,0,0,0,30,0,0,0,0,0,0,0,72,0,5,0,19,0,0,0,0,0,0,0,35,0,0,0, +0,0,0,0,72,0,5,0,19,0,0,0,1,0,0,0,35,0,0,0,16,0,0,0,72,0,5,0,23,0, +0,0,0,0,0,0,35,0,0,0,0,0,0,0,22,0,3,0,1,0,0,0,32,0,0,0,25,0,9,0, +2,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0, +0,0,27,0,3,0,3,0,0,0,2,0,0,0,32,0,4,0,4,0,0,0,0,0,0,0,3,0,0,0, +19,0,2,0,6,0,0,0,33,0,3,0,7,0,0,0,6,0,0,0,23,0,4,0,8,0,0,0,1,0, +0,0,4,0,0,0,32,0,4,0,9,0,0,0,1,0,0,0,8,0,0,0,21,0,4,0,11,0,0,0, +32,0,0,0,1,0,0,0,43,0,4,0,11,0,0,0,12,0,0,0,0,0,0,0,32,0,4,0,13,0, +0,0,7,0,0,0,8,0,0,0,23,0,4,0,14,0,0,0,1,0,0,0,2,0,0,0,32,0,4,0, +15,0,0,0,1,0,0,0,14,0,0,0,43,0,4,0,11,0,0,0,17,0,0,0,1,0,0,0,32,0, +4,0,18,0,0,0,7,0,0,0,14,0,0,0,30,0,4,0,19,0,0,0,8,0,0,0,14,0,0,0, +32,0,4,0,20,0,0,0,7,0,0,0,19,0,0,0,32,0,4,0,21,0,0,0,3,0,0,0,8,0, +0,0,30,0,3,0,23,0,0,0,8,0,0,0,32,0,4,0,24,0,0,0,7,0,0,0,23,0,0,0, +43,0,4,0,11,0,0,0,25,0,0,0,3,0,0,0,43,0,4,0,1,0,0,0,26,0,0,0,0,0, +0,0,20,0,2,0,27,0,0,0,59,0,4,0,4,0,0,0,5,0,0,0,0,0,0,0,59,0,4,0, +9,0,0,0,10,0,0,0,1,0,0,0,59,0,4,0,15,0,0,0,16,0,0,0,1,0,0,0,59,0, +4,0,21,0,0,0,22,0,0,0,3,0,0,0,54,0,5,0,6,0,0,0,28,0,0,0,0,0,0,0, +7,0,0,0,248,0,2,0,29,0,0,0,59,0,4,0,24,0,0,0,30,0,0,0,7,0,0,0,59,0, +4,0,20,0,0,0,31,0,0,0,7,0,0,0,65,0,5,0,13,0,0,0,32,0,0,0,31,0,0,0, +12,0,0,0,63,0,3,0,32,0,0,0,10,0,0,0,65,0,5,0,18,0,0,0,33,0,0,0,31,0, +0,0,17,0,0,0,63,0,3,0,33,0,0,0,16,0,0,0,65,0,5,0,13,0,0,0,34,0,0,0, +31,0,0,0,12,0,0,0,61,0,4,0,8,0,0,0,35,0,0,0,34,0,0,0,61,0,4,0,3,0, +0,0,36,0,0,0,5,0,0,0,65,0,5,0,18,0,0,0,37,0,0,0,31,0,0,0,17,0,0,0, +61,0,4,0,14,0,0,0,38,0,0,0,37,0,0,0,87,0,5,0,8,0,0,0,39,0,0,0,36,0, +0,0,38,0,0,0,133,0,5,0,8,0,0,0,40,0,0,0,35,0,0,0,39,0,0,0,65,0,5,0, +13,0,0,0,41,0,0,0,30,0,0,0,12,0,0,0,62,0,3,0,41,0,0,0,40,0,0,0,65,0, +5,0,13,0,0,0,45,0,0,0,30,0,0,0,12,0,0,0,61,0,4,0,8,0,0,0,46,0,0,0, +45,0,0,0,81,0,5,0,1,0,0,0,47,0,0,0,46,0,0,0,3,0,0,0,180,0,5,0,27,0, +0,0,48,0,0,0,47,0,0,0,26,0,0,0,247,0,3,0,42,0,0,0,0,0,0,0,250,0,4,0, +48,0,0,0,43,0,0,0,44,0,0,0,248,0,2,0,43,0,0,0,252,0,1,0,248,0,2,0,44,0, +0,0,249,0,2,0,42,0,0,0,248,0,2,0,42,0,0,0,61,0,4,0,23,0,0,0,49,0,0,0, +30,0,0,0,81,0,5,0,8,0,0,0,50,0,0,0,49,0,0,0,0,0,0,0,62,0,3,0,22,0, +0,0,50,0,0,0,253,0,1,0,56,0,1,0 diff --git a/runtime/Includes/Embedded/2DVertex.nzsl b/runtime/Includes/Embedded/2DVertex.nzsl new file mode 100644 index 0000000..ec65918 --- /dev/null +++ b/runtime/Includes/Embedded/2DVertex.nzsl @@ -0,0 +1,45 @@ +[nzsl_version("1.0")] +module; + +struct VertIn +{ + [location(0)] pos: vec4[f32], + [location(1)] color: vec4[f32], // unused + [location(2)] normal: vec4[f32], // unused + [location(3)] uv: vec2[f32] +} + +struct VertOut +{ + [location(0)] color: vec4[f32], + [location(1)] uv: vec2[f32], + [builtin(position)] pos: vec4[f32] +} + +struct ViewerData +{ + projection_matrix: mat4[f32] +} + +struct SpriteData +{ + color: vec4[f32], + position: vec2[f32] +} + +external +{ + [set(0), binding(0)] viewer_data: uniform[ViewerData], + model : push_constant[SpriteData] +} + +[entry(vert)] +fn main(input: VertIn) -> VertOut +{ + input.uv.x *= -1.0; + let output: VertOut; + output.uv = input.uv; + output.color = model.color; + output.pos = viewer_data.projection_matrix * vec4[f32](input.pos.xy + model.position, 0.0, 1.0); + return output; +} diff --git a/runtime/Includes/Embedded/2DVertex.spv.h b/runtime/Includes/Embedded/2DVertex.spv.h new file mode 100644 index 0000000..94cf250 --- /dev/null +++ b/runtime/Includes/Embedded/2DVertex.spv.h @@ -0,0 +1,80 @@ +3,2,35,7,0,0,1,0,39,0,0,0,77,0,0,0,0,0,0,0,17,0,2,0,1,0,0,0,14,0, +3,0,0,0,0,0,1,0,0,0,15,0,12,0,0,0,0,0,37,0,0,0,109,97,105,110,0,0,0,0, +14,0,0,0,18,0,0,0,20,0,0,0,23,0,0,0,29,0,0,0,31,0,0,0,32,0,0,0,3,0, +3,0,0,0,0,0,100,0,0,0,5,0,5,0,4,0,0,0,86,105,101,119,101,114,68,97,116,97,0,0, +6,0,8,0,4,0,0,0,0,0,0,0,112,114,111,106,101,99,116,105,111,110,95,109,97,116,114,105,120,0, +0,0,5,0,5,0,8,0,0,0,83,112,114,105,116,101,68,97,116,97,0,0,6,0,5,0,8,0,0,0, +0,0,0,0,99,111,108,111,114,0,0,0,6,0,6,0,8,0,0,0,1,0,0,0,112,111,115,105,116,105, +111,110,0,0,0,0,5,0,4,0,26,0,0,0,86,101,114,116,73,110,0,0,6,0,4,0,26,0,0,0, +0,0,0,0,112,111,115,0,6,0,5,0,26,0,0,0,1,0,0,0,99,111,108,111,114,0,0,0,6,0, +5,0,26,0,0,0,2,0,0,0,110,111,114,109,97,108,0,0,6,0,4,0,26,0,0,0,3,0,0,0, +117,118,0,0,5,0,4,0,33,0,0,0,86,101,114,116,79,117,116,0,6,0,5,0,33,0,0,0,0,0, +0,0,99,111,108,111,114,0,0,0,6,0,4,0,33,0,0,0,1,0,0,0,117,118,0,0,6,0,4,0, +33,0,0,0,2,0,0,0,112,111,115,0,5,0,5,0,6,0,0,0,118,105,101,119,101,114,95,100,97,116, +97,0,5,0,4,0,10,0,0,0,109,111,100,101,108,0,0,0,5,0,3,0,14,0,0,0,112,111,115,0, +5,0,4,0,18,0,0,0,99,111,108,111,114,0,0,0,5,0,4,0,20,0,0,0,110,111,114,109,97,108, +0,0,5,0,3,0,23,0,0,0,117,118,0,0,5,0,4,0,29,0,0,0,99,111,108,111,114,0,0,0, +5,0,3,0,31,0,0,0,117,118,0,0,5,0,5,0,32,0,0,0,112,111,115,105,116,105,111,110,0,0, +0,0,5,0,4,0,37,0,0,0,109,97,105,110,0,0,0,0,71,0,4,0,6,0,0,0,33,0,0,0, +0,0,0,0,71,0,4,0,6,0,0,0,34,0,0,0,0,0,0,0,71,0,4,0,32,0,0,0,11,0, +0,0,0,0,0,0,71,0,4,0,14,0,0,0,30,0,0,0,0,0,0,0,71,0,4,0,18,0,0,0, +30,0,0,0,1,0,0,0,71,0,4,0,20,0,0,0,30,0,0,0,2,0,0,0,71,0,4,0,23,0, +0,0,30,0,0,0,3,0,0,0,71,0,4,0,29,0,0,0,30,0,0,0,0,0,0,0,71,0,4,0, +31,0,0,0,30,0,0,0,1,0,0,0,71,0,3,0,4,0,0,0,2,0,0,0,72,0,4,0,4,0, +0,0,0,0,0,0,5,0,0,0,72,0,5,0,4,0,0,0,0,0,0,0,7,0,0,0,16,0,0,0, +72,0,5,0,4,0,0,0,0,0,0,0,35,0,0,0,0,0,0,0,71,0,3,0,8,0,0,0,2,0, +0,0,72,0,5,0,8,0,0,0,0,0,0,0,35,0,0,0,0,0,0,0,72,0,5,0,8,0,0,0, +1,0,0,0,35,0,0,0,16,0,0,0,72,0,5,0,26,0,0,0,0,0,0,0,35,0,0,0,0,0, +0,0,72,0,5,0,26,0,0,0,1,0,0,0,35,0,0,0,16,0,0,0,72,0,5,0,26,0,0,0, +2,0,0,0,35,0,0,0,32,0,0,0,72,0,5,0,26,0,0,0,3,0,0,0,35,0,0,0,48,0, +0,0,72,0,5,0,33,0,0,0,0,0,0,0,35,0,0,0,0,0,0,0,72,0,5,0,33,0,0,0, +1,0,0,0,35,0,0,0,16,0,0,0,72,0,5,0,33,0,0,0,2,0,0,0,35,0,0,0,32,0, +0,0,22,0,3,0,1,0,0,0,32,0,0,0,23,0,4,0,2,0,0,0,1,0,0,0,4,0,0,0, +24,0,4,0,3,0,0,0,2,0,0,0,4,0,0,0,30,0,3,0,4,0,0,0,3,0,0,0,32,0, +4,0,5,0,0,0,2,0,0,0,4,0,0,0,23,0,4,0,7,0,0,0,1,0,0,0,2,0,0,0, +30,0,4,0,8,0,0,0,2,0,0,0,7,0,0,0,32,0,4,0,9,0,0,0,9,0,0,0,8,0, +0,0,19,0,2,0,11,0,0,0,33,0,3,0,12,0,0,0,11,0,0,0,32,0,4,0,13,0,0,0, +1,0,0,0,2,0,0,0,21,0,4,0,15,0,0,0,32,0,0,0,1,0,0,0,43,0,4,0,15,0, +0,0,16,0,0,0,0,0,0,0,32,0,4,0,17,0,0,0,7,0,0,0,2,0,0,0,43,0,4,0, +15,0,0,0,19,0,0,0,1,0,0,0,43,0,4,0,15,0,0,0,21,0,0,0,2,0,0,0,32,0, +4,0,22,0,0,0,1,0,0,0,7,0,0,0,43,0,4,0,15,0,0,0,24,0,0,0,3,0,0,0, +32,0,4,0,25,0,0,0,7,0,0,0,7,0,0,0,30,0,6,0,26,0,0,0,2,0,0,0,2,0, +0,0,2,0,0,0,7,0,0,0,32,0,4,0,27,0,0,0,7,0,0,0,26,0,0,0,32,0,4,0, +28,0,0,0,3,0,0,0,2,0,0,0,32,0,4,0,30,0,0,0,3,0,0,0,7,0,0,0,30,0, +5,0,33,0,0,0,2,0,0,0,7,0,0,0,2,0,0,0,43,0,4,0,1,0,0,0,34,0,0,0, +0,0,128,63,32,0,4,0,35,0,0,0,7,0,0,0,33,0,0,0,43,0,4,0,1,0,0,0,36,0, +0,0,0,0,0,0,32,0,4,0,51,0,0,0,7,0,0,0,1,0,0,0,32,0,4,0,56,0,0,0, +9,0,0,0,2,0,0,0,32,0,4,0,60,0,0,0,2,0,0,0,3,0,0,0,32,0,4,0,66,0, +0,0,9,0,0,0,7,0,0,0,59,0,4,0,5,0,0,0,6,0,0,0,2,0,0,0,59,0,4,0, +9,0,0,0,10,0,0,0,9,0,0,0,59,0,4,0,13,0,0,0,14,0,0,0,1,0,0,0,59,0, +4,0,13,0,0,0,18,0,0,0,1,0,0,0,59,0,4,0,13,0,0,0,20,0,0,0,1,0,0,0, +59,0,4,0,22,0,0,0,23,0,0,0,1,0,0,0,59,0,4,0,28,0,0,0,29,0,0,0,3,0, +0,0,59,0,4,0,30,0,0,0,31,0,0,0,3,0,0,0,59,0,4,0,28,0,0,0,32,0,0,0, +3,0,0,0,54,0,5,0,11,0,0,0,37,0,0,0,0,0,0,0,12,0,0,0,248,0,2,0,38,0, +0,0,59,0,4,0,35,0,0,0,39,0,0,0,7,0,0,0,59,0,4,0,27,0,0,0,40,0,0,0, +7,0,0,0,65,0,5,0,17,0,0,0,41,0,0,0,40,0,0,0,16,0,0,0,63,0,3,0,41,0, +0,0,14,0,0,0,65,0,5,0,17,0,0,0,42,0,0,0,40,0,0,0,19,0,0,0,63,0,3,0, +42,0,0,0,18,0,0,0,65,0,5,0,17,0,0,0,43,0,0,0,40,0,0,0,21,0,0,0,63,0, +3,0,43,0,0,0,20,0,0,0,65,0,5,0,25,0,0,0,44,0,0,0,40,0,0,0,24,0,0,0, +63,0,3,0,44,0,0,0,23,0,0,0,65,0,5,0,25,0,0,0,45,0,0,0,40,0,0,0,24,0, +0,0,61,0,4,0,7,0,0,0,46,0,0,0,45,0,0,0,81,0,5,0,1,0,0,0,47,0,0,0, +46,0,0,0,0,0,0,0,127,0,4,0,1,0,0,0,48,0,0,0,34,0,0,0,133,0,5,0,1,0, +0,0,49,0,0,0,47,0,0,0,48,0,0,0,65,0,5,0,25,0,0,0,50,0,0,0,40,0,0,0, +24,0,0,0,65,0,5,0,51,0,0,0,52,0,0,0,50,0,0,0,16,0,0,0,62,0,3,0,52,0, +0,0,49,0,0,0,65,0,5,0,25,0,0,0,53,0,0,0,40,0,0,0,24,0,0,0,61,0,4,0, +7,0,0,0,54,0,0,0,53,0,0,0,65,0,5,0,25,0,0,0,55,0,0,0,39,0,0,0,19,0, +0,0,62,0,3,0,55,0,0,0,54,0,0,0,65,0,5,0,56,0,0,0,57,0,0,0,10,0,0,0, +16,0,0,0,61,0,4,0,2,0,0,0,58,0,0,0,57,0,0,0,65,0,5,0,17,0,0,0,59,0, +0,0,39,0,0,0,16,0,0,0,62,0,3,0,59,0,0,0,58,0,0,0,65,0,5,0,60,0,0,0, +61,0,0,0,6,0,0,0,16,0,0,0,61,0,4,0,3,0,0,0,62,0,0,0,61,0,0,0,65,0, +5,0,17,0,0,0,63,0,0,0,40,0,0,0,16,0,0,0,61,0,4,0,2,0,0,0,64,0,0,0, +63,0,0,0,79,0,7,0,7,0,0,0,65,0,0,0,64,0,0,0,64,0,0,0,0,0,0,0,1,0, +0,0,65,0,5,0,66,0,0,0,67,0,0,0,10,0,0,0,19,0,0,0,61,0,4,0,7,0,0,0, +68,0,0,0,67,0,0,0,129,0,5,0,7,0,0,0,69,0,0,0,65,0,0,0,68,0,0,0,80,0, +6,0,2,0,0,0,70,0,0,0,69,0,0,0,36,0,0,0,34,0,0,0,145,0,5,0,2,0,0,0, +71,0,0,0,62,0,0,0,70,0,0,0,65,0,5,0,17,0,0,0,72,0,0,0,39,0,0,0,21,0, +0,0,62,0,3,0,72,0,0,0,71,0,0,0,61,0,4,0,33,0,0,0,73,0,0,0,39,0,0,0, +81,0,5,0,2,0,0,0,74,0,0,0,73,0,0,0,0,0,0,0,62,0,3,0,29,0,0,0,74,0, +0,0,81,0,5,0,7,0,0,0,75,0,0,0,73,0,0,0,1,0,0,0,62,0,3,0,31,0,0,0, +75,0,0,0,81,0,5,0,2,0,0,0,76,0,0,0,73,0,0,0,2,0,0,0,62,0,3,0,32,0, +0,0,76,0,0,0,253,0,1,0,56,0,1,0 diff --git a/runtime/Includes/Utils/DogicaTTF.h b/runtime/Includes/Embedded/DogicaTTF.h similarity index 99% rename from runtime/Includes/Utils/DogicaTTF.h rename to runtime/Includes/Embedded/DogicaTTF.h index 3d24ce9..f624ff2 100644 --- a/runtime/Includes/Utils/DogicaTTF.h +++ b/runtime/Includes/Embedded/DogicaTTF.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* DogicaTTF.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/11 16:20:25 by maldavid #+# #+# */ -/* Updated: 2024/03/27 21:59:40 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_DOGICA_TTF__ #define __MLX_DOGICA_TTF__ diff --git a/runtime/Includes/Utils/IconMlx.h b/runtime/Includes/Embedded/IconMlx.h similarity index 99% rename from runtime/Includes/Utils/IconMlx.h rename to runtime/Includes/Embedded/IconMlx.h index 4a8e5a3..b74d1ea 100644 --- a/runtime/Includes/Utils/IconMlx.h +++ b/runtime/Includes/Embedded/IconMlx.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* IconMlx.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/11/25 11:23:16 by maldavid #+# #+# */ -/* Updated: 2024/03/27 21:59:45 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __ICON_MLX__ #define __ICON_MLX__ diff --git a/runtime/Includes/Embedded/ScreenFragment.nzsl b/runtime/Includes/Embedded/ScreenFragment.nzsl new file mode 100644 index 0000000..562aca0 --- /dev/null +++ b/runtime/Includes/Embedded/ScreenFragment.nzsl @@ -0,0 +1,46 @@ +[nzsl_version("1.0")] +module; + +struct VertOut +{ + [location(0)] uv : vec2[f32] +} + +struct FragOut +{ + [location(0)] color: vec4[f32] +} + +external +{ + [set(0), binding(0)] u_texture: sampler2D[f32] +} + +option approximates_rgb: bool = false; + +fn LinearTosRGB(color: vec3[f32]) -> vec3[f32] +{ + const if(!approximates_rgb) + { + return select( + color > (0.0031308).rrr, + 1.055 * pow(color, (1.0 / 2.4).rrr) - (0.055).rrr, + 12.92 * color + ); + } + else + return pow(color, (1.0 / 2.2).rrr); +} + +option gamma_correction: bool = false; + +[entry(frag)] +fn main(input: VertOut) -> FragOut +{ + let output: FragOut; + const if(gamma_correction) + output.color = vec4[f32](LinearTosRGB(u_texture.Sample(input.uv).xyz), 1.0); + else + output.color = u_texture.Sample(input.uv); + return output; +} diff --git a/runtime/Includes/Embedded/ScreenFragment.spv.h b/runtime/Includes/Embedded/ScreenFragment.spv.h new file mode 100644 index 0000000..ed5f334 --- /dev/null +++ b/runtime/Includes/Embedded/ScreenFragment.spv.h @@ -0,0 +1,49 @@ +3,2,35,7,0,0,1,0,39,0,0,0,62,0,0,0,0,0,0,0,17,0,2,0,1,0,0,0,11,0, +6,0,32,0,0,0,71,76,83,76,46,115,116,100,46,52,53,48,0,0,0,0,14,0,3,0,0,0,0,0, +1,0,0,0,15,0,7,0,4,0,0,0,34,0,0,0,109,97,105,110,0,0,0,0,23,0,0,0,29,0, +0,0,16,0,3,0,34,0,0,0,7,0,0,0,3,0,3,0,0,0,0,0,100,0,0,0,5,0,4,0, +25,0,0,0,86,101,114,116,79,117,116,0,6,0,4,0,25,0,0,0,0,0,0,0,117,118,0,0,5,0, +4,0,30,0,0,0,70,114,97,103,79,117,116,0,6,0,5,0,30,0,0,0,0,0,0,0,99,111,108,111, +114,0,0,0,5,0,5,0,5,0,0,0,117,95,116,101,120,116,117,114,101,0,0,0,5,0,3,0,23,0, +0,0,117,118,0,0,5,0,4,0,29,0,0,0,99,111,108,111,114,0,0,0,5,0,6,0,33,0,0,0, +76,105,110,101,97,114,84,111,115,82,71,66,0,0,0,0,5,0,4,0,34,0,0,0,109,97,105,110,0,0, +0,0,71,0,4,0,5,0,0,0,33,0,0,0,0,0,0,0,71,0,4,0,5,0,0,0,34,0,0,0, +0,0,0,0,71,0,4,0,23,0,0,0,30,0,0,0,0,0,0,0,71,0,4,0,29,0,0,0,30,0, +0,0,0,0,0,0,72,0,5,0,25,0,0,0,0,0,0,0,35,0,0,0,0,0,0,0,72,0,5,0, +30,0,0,0,0,0,0,0,35,0,0,0,0,0,0,0,22,0,3,0,1,0,0,0,32,0,0,0,25,0, +9,0,2,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0, +0,0,0,0,27,0,3,0,3,0,0,0,2,0,0,0,32,0,4,0,4,0,0,0,0,0,0,0,3,0, +0,0,23,0,4,0,6,0,0,0,1,0,0,0,3,0,0,0,32,0,4,0,7,0,0,0,7,0,0,0, +6,0,0,0,33,0,4,0,8,0,0,0,6,0,0,0,7,0,0,0,43,0,4,0,1,0,0,0,9,0, +0,0,28,46,77,59,21,0,4,0,10,0,0,0,32,0,0,0,1,0,0,0,43,0,4,0,10,0,0,0, +11,0,0,0,0,0,0,0,20,0,2,0,12,0,0,0,23,0,4,0,13,0,0,0,12,0,0,0,3,0, +0,0,43,0,4,0,1,0,0,0,14,0,0,0,61,10,135,63,43,0,4,0,1,0,0,0,15,0,0,0, +0,0,128,63,43,0,4,0,1,0,0,0,16,0,0,0,154,153,25,64,43,0,4,0,1,0,0,0,17,0, +0,0,174,71,97,61,43,0,4,0,1,0,0,0,18,0,0,0,82,184,78,65,19,0,2,0,19,0,0,0, +33,0,3,0,20,0,0,0,19,0,0,0,23,0,4,0,21,0,0,0,1,0,0,0,2,0,0,0,32,0, +4,0,22,0,0,0,1,0,0,0,21,0,0,0,32,0,4,0,24,0,0,0,7,0,0,0,21,0,0,0, +30,0,3,0,25,0,0,0,21,0,0,0,32,0,4,0,26,0,0,0,7,0,0,0,25,0,0,0,23,0, +4,0,27,0,0,0,1,0,0,0,4,0,0,0,32,0,4,0,28,0,0,0,3,0,0,0,27,0,0,0, +30,0,3,0,30,0,0,0,27,0,0,0,32,0,4,0,31,0,0,0,7,0,0,0,30,0,0,0,32,0, +4,0,59,0,0,0,7,0,0,0,27,0,0,0,59,0,4,0,4,0,0,0,5,0,0,0,0,0,0,0, +59,0,4,0,22,0,0,0,23,0,0,0,1,0,0,0,59,0,4,0,28,0,0,0,29,0,0,0,3,0, +0,0,54,0,5,0,6,0,0,0,33,0,0,0,0,0,0,0,8,0,0,0,55,0,3,0,7,0,0,0, +35,0,0,0,248,0,2,0,36,0,0,0,61,0,4,0,6,0,0,0,37,0,0,0,35,0,0,0,80,0, +6,0,6,0,0,0,38,0,0,0,9,0,0,0,9,0,0,0,9,0,0,0,186,0,5,0,13,0,0,0, +39,0,0,0,37,0,0,0,38,0,0,0,61,0,4,0,6,0,0,0,40,0,0,0,35,0,0,0,136,0, +5,0,1,0,0,0,41,0,0,0,15,0,0,0,16,0,0,0,80,0,6,0,6,0,0,0,42,0,0,0, +41,0,0,0,41,0,0,0,41,0,0,0,12,0,7,0,6,0,0,0,43,0,0,0,32,0,0,0,26,0, +0,0,40,0,0,0,42,0,0,0,142,0,5,0,6,0,0,0,44,0,0,0,43,0,0,0,14,0,0,0, +80,0,6,0,6,0,0,0,45,0,0,0,17,0,0,0,17,0,0,0,17,0,0,0,131,0,5,0,6,0, +0,0,46,0,0,0,44,0,0,0,45,0,0,0,61,0,4,0,6,0,0,0,47,0,0,0,35,0,0,0, +142,0,5,0,6,0,0,0,48,0,0,0,47,0,0,0,18,0,0,0,169,0,6,0,6,0,0,0,49,0, +0,0,39,0,0,0,46,0,0,0,48,0,0,0,254,0,2,0,49,0,0,0,56,0,1,0,54,0,5,0, +19,0,0,0,34,0,0,0,0,0,0,0,20,0,0,0,248,0,2,0,50,0,0,0,59,0,4,0,31,0, +0,0,51,0,0,0,7,0,0,0,59,0,4,0,26,0,0,0,52,0,0,0,7,0,0,0,65,0,5,0, +24,0,0,0,53,0,0,0,52,0,0,0,11,0,0,0,63,0,3,0,53,0,0,0,23,0,0,0,61,0, +4,0,3,0,0,0,54,0,0,0,5,0,0,0,65,0,5,0,24,0,0,0,55,0,0,0,52,0,0,0, +11,0,0,0,61,0,4,0,21,0,0,0,56,0,0,0,55,0,0,0,87,0,5,0,27,0,0,0,57,0, +0,0,54,0,0,0,56,0,0,0,65,0,5,0,59,0,0,0,58,0,0,0,51,0,0,0,11,0,0,0, +62,0,3,0,58,0,0,0,57,0,0,0,61,0,4,0,30,0,0,0,60,0,0,0,51,0,0,0,81,0, +5,0,27,0,0,0,61,0,0,0,60,0,0,0,0,0,0,0,62,0,3,0,29,0,0,0,61,0,0,0, +253,0,1,0,56,0,1,0 diff --git a/runtime/Includes/Embedded/ScreenVertex.nzsl b/runtime/Includes/Embedded/ScreenVertex.nzsl new file mode 100644 index 0000000..94a4440 --- /dev/null +++ b/runtime/Includes/Embedded/ScreenVertex.nzsl @@ -0,0 +1,31 @@ +[nzsl_version("1.0")] +module; + +struct VertIn +{ + [builtin(vertex_index)] vert_index: i32 +} + +struct VertOut +{ + [location(0)] uv: vec2[f32], + [builtin(position)] position: vec4[f32] +} + +const vertices = array[vec2[f32]]( + vec2[f32](-1.0, -3.0), + vec2[f32](-1.0, 1.0), + vec2[f32]( 3.0, 1.0) +); + +[entry(vert)] +fn main(input: VertIn) -> VertOut +{ + let position = vertices[input.vert_index]; + + let output: VertOut; + output.position = vec4[f32](position, 0.0, 1.0); + output.uv = position * 0.5 + vec2[f32](0.5, 0.5); + + return output; +} diff --git a/runtime/Includes/Embedded/ScreenVertex.spv.h b/runtime/Includes/Embedded/ScreenVertex.spv.h new file mode 100644 index 0000000..3ba7900 --- /dev/null +++ b/runtime/Includes/Embedded/ScreenVertex.spv.h @@ -0,0 +1,48 @@ +3,2,35,7,0,0,1,0,39,0,0,0,59,0,0,0,0,0,0,0,17,0,2,0,1,0,0,0,14,0, +3,0,0,0,0,0,1,0,0,0,15,0,8,0,0,0,0,0,36,0,0,0,109,97,105,110,0,0,0,0, +20,0,0,0,26,0,0,0,29,0,0,0,3,0,3,0,0,0,0,0,100,0,0,0,5,0,4,0,23,0, +0,0,86,101,114,116,73,110,0,0,6,0,6,0,23,0,0,0,0,0,0,0,118,101,114,116,95,105,110,100, +101,120,0,0,5,0,4,0,30,0,0,0,86,101,114,116,79,117,116,0,6,0,4,0,30,0,0,0,0,0, +0,0,117,118,0,0,6,0,6,0,30,0,0,0,1,0,0,0,112,111,115,105,116,105,111,110,0,0,0,0, +5,0,5,0,15,0,0,0,118,101,114,116,105,99,101,115,0,0,0,0,5,0,6,0,20,0,0,0,118,101, +114,116,101,120,95,105,110,100,101,120,0,0,0,0,5,0,3,0,26,0,0,0,117,118,0,0,5,0,5,0, +29,0,0,0,112,111,115,105,116,105,111,110,0,0,0,0,5,0,4,0,36,0,0,0,109,97,105,110,0,0, +0,0,71,0,4,0,20,0,0,0,11,0,0,0,42,0,0,0,71,0,4,0,29,0,0,0,11,0,0,0, +0,0,0,0,71,0,4,0,26,0,0,0,30,0,0,0,0,0,0,0,72,0,5,0,23,0,0,0,0,0, +0,0,35,0,0,0,0,0,0,0,72,0,5,0,30,0,0,0,0,0,0,0,35,0,0,0,0,0,0,0, +72,0,5,0,30,0,0,0,1,0,0,0,35,0,0,0,16,0,0,0,22,0,3,0,1,0,0,0,32,0, +0,0,23,0,4,0,2,0,0,0,1,0,0,0,2,0,0,0,21,0,4,0,3,0,0,0,32,0,0,0, +0,0,0,0,43,0,4,0,3,0,0,0,4,0,0,0,3,0,0,0,28,0,4,0,5,0,0,0,2,0, +0,0,4,0,0,0,32,0,4,0,6,0,0,0,6,0,0,0,5,0,0,0,43,0,4,0,1,0,0,0, +7,0,0,0,0,0,128,191,43,0,4,0,1,0,0,0,8,0,0,0,0,0,64,192,44,0,5,0,2,0, +0,0,9,0,0,0,7,0,0,0,8,0,0,0,43,0,4,0,1,0,0,0,10,0,0,0,0,0,128,63, +44,0,5,0,2,0,0,0,11,0,0,0,7,0,0,0,10,0,0,0,43,0,4,0,1,0,0,0,12,0, +0,0,0,0,64,64,44,0,5,0,2,0,0,0,13,0,0,0,12,0,0,0,10,0,0,0,44,0,6,0, +5,0,0,0,14,0,0,0,9,0,0,0,11,0,0,0,13,0,0,0,19,0,2,0,16,0,0,0,33,0, +3,0,17,0,0,0,16,0,0,0,21,0,4,0,18,0,0,0,32,0,0,0,1,0,0,0,32,0,4,0, +19,0,0,0,1,0,0,0,18,0,0,0,43,0,4,0,18,0,0,0,21,0,0,0,0,0,0,0,32,0, +4,0,22,0,0,0,7,0,0,0,18,0,0,0,30,0,3,0,23,0,0,0,18,0,0,0,32,0,4,0, +24,0,0,0,7,0,0,0,23,0,0,0,32,0,4,0,25,0,0,0,3,0,0,0,2,0,0,0,23,0, +4,0,27,0,0,0,1,0,0,0,4,0,0,0,32,0,4,0,28,0,0,0,3,0,0,0,27,0,0,0, +30,0,4,0,30,0,0,0,2,0,0,0,27,0,0,0,32,0,4,0,31,0,0,0,7,0,0,0,2,0, +0,0,32,0,4,0,32,0,0,0,7,0,0,0,30,0,0,0,43,0,4,0,18,0,0,0,33,0,0,0, +1,0,0,0,43,0,4,0,1,0,0,0,34,0,0,0,0,0,0,0,43,0,4,0,1,0,0,0,35,0, +0,0,0,0,0,63,32,0,4,0,44,0,0,0,6,0,0,0,2,0,0,0,32,0,4,0,50,0,0,0, +7,0,0,0,27,0,0,0,59,0,5,0,6,0,0,0,15,0,0,0,6,0,0,0,14,0,0,0,59,0, +4,0,19,0,0,0,20,0,0,0,1,0,0,0,59,0,4,0,25,0,0,0,26,0,0,0,3,0,0,0, +59,0,4,0,28,0,0,0,29,0,0,0,3,0,0,0,54,0,5,0,16,0,0,0,36,0,0,0,0,0, +0,0,17,0,0,0,248,0,2,0,37,0,0,0,59,0,4,0,31,0,0,0,38,0,0,0,7,0,0,0, +59,0,4,0,32,0,0,0,39,0,0,0,7,0,0,0,59,0,4,0,24,0,0,0,40,0,0,0,7,0, +0,0,65,0,5,0,22,0,0,0,41,0,0,0,40,0,0,0,21,0,0,0,63,0,3,0,41,0,0,0, +20,0,0,0,65,0,5,0,22,0,0,0,42,0,0,0,40,0,0,0,21,0,0,0,61,0,4,0,18,0, +0,0,43,0,0,0,42,0,0,0,65,0,5,0,44,0,0,0,45,0,0,0,15,0,0,0,43,0,0,0, +61,0,4,0,2,0,0,0,46,0,0,0,45,0,0,0,62,0,3,0,38,0,0,0,46,0,0,0,61,0, +4,0,2,0,0,0,47,0,0,0,38,0,0,0,80,0,6,0,27,0,0,0,48,0,0,0,47,0,0,0, +34,0,0,0,10,0,0,0,65,0,5,0,50,0,0,0,49,0,0,0,39,0,0,0,33,0,0,0,62,0, +3,0,49,0,0,0,48,0,0,0,61,0,4,0,2,0,0,0,51,0,0,0,38,0,0,0,142,0,5,0, +2,0,0,0,52,0,0,0,51,0,0,0,35,0,0,0,80,0,5,0,2,0,0,0,53,0,0,0,35,0, +0,0,35,0,0,0,129,0,5,0,2,0,0,0,54,0,0,0,52,0,0,0,53,0,0,0,65,0,5,0, +31,0,0,0,55,0,0,0,39,0,0,0,21,0,0,0,62,0,3,0,55,0,0,0,54,0,0,0,61,0, +4,0,30,0,0,0,56,0,0,0,39,0,0,0,81,0,5,0,2,0,0,0,57,0,0,0,56,0,0,0, +0,0,0,0,62,0,3,0,26,0,0,0,57,0,0,0,81,0,5,0,27,0,0,0,58,0,0,0,56,0, +0,0,1,0,0,0,62,0,3,0,29,0,0,0,58,0,0,0,253,0,1,0,56,0,1,0 diff --git a/runtime/Includes/Graphics/Mesh.h b/runtime/Includes/Graphics/Mesh.h new file mode 100644 index 0000000..999213f --- /dev/null +++ b/runtime/Includes/Graphics/Mesh.h @@ -0,0 +1,53 @@ +#ifndef __MLX_RENDERER_MESH__ +#define __MLX_RENDERER_MESH__ + +#include +#include +#include + +namespace mlx +{ + class Mesh + { + public: + struct SubMesh + { + VertexBuffer vbo; + IndexBuffer ibo; + std::size_t triangle_count = 0; + + inline SubMesh(const std::vector& vertices, const std::vector& indices) + { + CPUBuffer vb(vertices.size() * sizeof(Vertex)); + std::memcpy(vb.GetData(), vertices.data(), vb.GetSize()); + vbo.Init(vb.GetSize()); + vbo.SetData(std::move(vb)); + + CPUBuffer ib(indices.size() * sizeof(std::uint32_t)); + std::memcpy(ib.GetData(), indices.data(), ib.GetSize()); + ibo.Init(ib.GetSize()); + ibo.SetData(std::move(ib)); + + triangle_count = vertices.size() / 3; + } + }; + + public: + Mesh() = default; + + void Draw(VkCommandBuffer cmd, std::size_t& drawcalls, std::size_t& polygondrawn) const noexcept; + void Draw(VkCommandBuffer cmd, std::size_t& drawcalls, std::size_t& polygondrawn, std::size_t submesh_index) const noexcept; + + inline std::size_t GetSubMeshCount() const { return m_sub_meshes.size(); } + + inline void AddSubMesh(SubMesh mesh) { m_sub_meshes.emplace_back(std::move(mesh)); } + [[nodiscard]] inline SubMesh& GetSubMesh(std::size_t index) { return m_sub_meshes.at(index); } + + ~Mesh(); + + private: + std::vector m_sub_meshes; + }; +} + +#endif diff --git a/runtime/Includes/Graphics/Scene.h b/runtime/Includes/Graphics/Scene.h new file mode 100644 index 0000000..32f06bf --- /dev/null +++ b/runtime/Includes/Graphics/Scene.h @@ -0,0 +1,32 @@ +#ifndef __MLX_SCENE__ +#define __MLX_SCENE__ + +#include + +namespace mlx +{ + struct SceneDescriptor + { + NonOwningPtr renderer; + // More description may come in future + }; + + class Scene + { + public: + Scene(SceneDescriptor desc); + + Sprite& CreateSprite(std::shared_ptr texture) noexcept; + + [[nodiscard]] inline const std::vector>& GetSprites() const noexcept { return m_sprites; } + [[nodiscard]] inline const SceneDescriptor& GetDescription() const noexcept { return m_descriptor; } + + ~Scene() = default; + + private: + SceneDescriptor m_descriptor; + std::vector> m_sprites; + }; +} + +#endif diff --git a/runtime/Includes/Graphics/Sprite.h b/runtime/Includes/Graphics/Sprite.h new file mode 100644 index 0000000..1720f6a --- /dev/null +++ b/runtime/Includes/Graphics/Sprite.h @@ -0,0 +1,53 @@ +#ifndef __MLX_SPRITE__ +#define __MLX_SPRITE__ + +#include +#include +#include +#include +#include + +namespace mlx +{ + class Sprite + { + friend class Render2DPass; + + public: + Sprite(std::shared_ptr texture); + + inline void SetColor(Vec4f color) noexcept { m_color = color; } + inline void SetPosition(Vec2ui position) noexcept { m_position = position; } + + [[nodiscard]] inline const Vec4f& GetColor() const noexcept { return m_color; } + [[nodiscard]] inline const Vec2ui& GetPosition() const noexcept { return m_position; } + [[nodiscard]] inline std::shared_ptr GetMesh() const { return p_mesh; } + [[nodiscard]] inline std::shared_ptr GetTexture() const { return p_texture; } + + ~Sprite() = default; + + private: + [[nodiscard]] inline bool IsSetInit() const noexcept { return m_set.IsInit(); } + [[nodiscard]] inline VkDescriptorSet GetSet(std::size_t frame_index) const noexcept { return m_set.GetSet(frame_index); } + + inline void UpdateDescriptorSet(const DescriptorSet& set) + { + m_set = set.Duplicate(); + } + + inline void Bind(std::size_t frame_index, VkCommandBuffer cmd) + { + m_set.SetImage(frame_index, 0, *p_texture); + m_set.Update(frame_index, cmd); + } + + private: + DescriptorSet m_set; + std::shared_ptr p_texture; + std::shared_ptr p_mesh; + Vec4f m_color = Vec4f{ 1.0f, 1.0f, 1.0f, 1.0f }; + Vec2ui m_position = Vec2ui{ 0, 0 }; + }; +} + +#endif diff --git a/runtime/Includes/Maths/Angles.h b/runtime/Includes/Maths/Angles.h new file mode 100644 index 0000000..801d71e --- /dev/null +++ b/runtime/Includes/Maths/Angles.h @@ -0,0 +1,108 @@ +#ifndef __SCOP_ANGLES__ +#define __SCOP_ANGLES__ + +#include +#include +#include + +namespace Scop +{ + template struct EulerAngles; + template struct Quat; + + template + struct Angle + { + T value; + + constexpr Angle() = default; + constexpr Angle(T angle); + template constexpr explicit Angle(const Angle& Angle); + template constexpr Angle(const Angle& angle); + constexpr Angle(const Angle&) = default; + constexpr Angle(Angle&&) noexcept = default; + ~Angle() = default; + + constexpr bool ApproxEqual(const Angle& angle) const; + constexpr bool ApproxEqual(const Angle& angle, T max_difference) const; + + T GetCos() const; + T GetSin() const; + std::pair GetSinCos() const; + T GetTan() const; + + constexpr Angle& Normalize(); + + template T To() const; + template Angle ToAngle() const; + constexpr T ToDegrees() const; + constexpr Angle ToDegreeAngle() const; + EulerAngles ToEulerAngles() const; + Quat ToQuat() const; + constexpr T ToRadians() const; + constexpr Angle ToRadianAngle() const; + std::string ToString() const; + constexpr T ToTurns() const; + constexpr Angle ToTurnAngle() const; + + constexpr Angle& operator=(const Angle&) = default; + constexpr Angle& operator=(Angle&&) noexcept = default; + + constexpr Angle operator+() const; + constexpr Angle operator-() const; + + constexpr Angle operator+(Angle other) const; + constexpr Angle operator-(Angle other) const; + constexpr Angle operator*(T scalar) const; + constexpr Angle operator/(T divider) const; + + constexpr Angle& operator+=(Angle other); + constexpr Angle& operator-=(Angle other); + constexpr Angle& operator*=(T scalar); + constexpr Angle& operator/=(T divider); + + constexpr bool operator==(Angle other) const; + constexpr bool operator!=(Angle other) const; + constexpr bool operator<(Angle other) const; + constexpr bool operator<=(Angle other) const; + constexpr bool operator>(Angle other) const; + constexpr bool operator>=(Angle other) const; + + static constexpr bool ApproxEqual(const Angle& lhs, const Angle& rhs); + static constexpr bool ApproxEqual(const Angle& lhs, const Angle& rhs, T max_difference); + static constexpr Angle Clamp(Angle angle, Angle min, Angle max); + template static constexpr Angle From(T value); + static constexpr Angle FromDegrees(T degrees); + static constexpr Angle FromRadians(T radians); + static constexpr Angle FromTurns(T turn); + static constexpr Angle Zero(); + }; + + template + using DegreeAngle = Angle; + + using DegreeAngled = DegreeAngle; + using DegreeAnglef = DegreeAngle; + + template + using RadianAngle = Angle; + + using RadianAngled = RadianAngle; + using RadianAnglef = RadianAngle; + + template + using TurnAngle = Angle; + + using TurnAngled = TurnAngle; + using TurnAnglef = TurnAngle; + + template Angle operator*(T scale, Angle angle); + + template Angle operator/(T divider, Angle angle); + + template std::ostream& operator<<(std::ostream& out, Angle angle); +} + +#include + +#endif diff --git a/runtime/Includes/Maths/Angles.inl b/runtime/Includes/Maths/Angles.inl new file mode 100644 index 0000000..5c2631e --- /dev/null +++ b/runtime/Includes/Maths/Angles.inl @@ -0,0 +1,488 @@ +#pragma once +#include + +#include +#include + +#include +#include + +namespace Scop +{ + namespace Internal + { + template struct AngleConversion; + + template + struct AngleConversion + { + template + static constexpr T Convert(T angle) + { + return angle; + } + }; + + template<> + struct AngleConversion + { + template + static constexpr T Convert(T angle) + { + return DegreeToRadian(angle); + } + }; + + template<> + struct AngleConversion + { + template + static constexpr T Convert(T angle) + { + return angle / T(360); + } + }; + + template<> + struct AngleConversion + { + template + static constexpr T Convert(T angle) + { + return RadianToDegree(angle); + } + }; + + template<> + struct AngleConversion + { + template + static constexpr T Convert(T angle) + { + return angle / Tau(); + } + }; + + template<> + struct AngleConversion + { + template + static constexpr T Convert(T angle) + { + return angle * T(360); + } + }; + + template<> + struct AngleConversion + { + template + static constexpr T Convert(T angle) + { + return angle * Tau(); + } + }; + + template struct AngleUtils; + + template<> + struct AngleUtils + { + template + static constexpr T GetEpsilon() + { + return T(1e-4); + } + + template + static constexpr T GetLimit() + { + return 360; + } + + template static std::ostream& ToString(std::ostream& out, T value) + { + return out << "Angle(" << value << "deg)"; + } + }; + + template<> + struct AngleUtils + { + template + static constexpr T GetEpsilon() + { + return T(1e-5); + } + + template + static constexpr T GetLimit() + { + return Tau(); + } + + template + static std::ostream& ToString(std::ostream& out, T value) + { + return out << "Angle(" << value << "rad)"; + } + }; + + template<> + struct AngleUtils + { + template + static constexpr T GetEpsilon() + { + return T(1e-5); + } + + template + static constexpr T GetLimit() + { + return 1; + } + + template + static std::ostream& ToString(std::ostream& out, T value) + { + return out << "Angle(" << value << "turn)"; + } + }; + + template + void SinCos(T x, T* sin, T* cos) + { + double s, c; + ::sincos(x, &s, &c); + + *sin = static_cast(s); + *cos = static_cast(c); + } + + template<> + inline void SinCos(float x, float* s, float* c) + { + ::sincosf(x, s, c); + } + + template<> + inline void SinCos(long double x, long double* s, long double* c) + { + ::sincosl(x, s, c); + } + } + + template + constexpr Angle::Angle(T angle) : + value(angle) + { + } + + template + template + constexpr Angle::Angle(const Angle& angle) : + value(static_cast(angle.value)) + { + } + + template + template + constexpr Angle::Angle(const Angle& angle) : + value(Internal::AngleConversion::Convert(angle.value)) + { + } + + template + constexpr bool Angle::ApproxEqual(const Angle& angle) const + { + return ApproxEqual(angle, Internal::AngleUtils::template GetEpsilon()); + } + + template + constexpr bool Angle::ApproxEqual(const Angle& angle, T maxDifference) const + { + return NumberEquals(value, angle.value, maxDifference); + } + + template + T Angle::GetCos() const + { + return std::cos(ToRadians()); + } + + template + T Angle::GetSin() const + { + return std::sin(ToRadians()); + } + + template + std::pair Angle::GetSinCos() const + { + T sin, cos; + Internal::SinCos(ToRadians(), &sin, &cos); + + return std::make_pair(sin, cos); + } + + template + T Angle::GetTan() const + { + return std::tan(ToRadians()); + } + + template + constexpr Angle& Angle::Normalize() + { + constexpr T limit = Internal::AngleUtils::template GetLimit(); + constexpr T halfLimit = limit / T(2); + + value = Mod(value + halfLimit, limit); + if (value < T(0)) + value += limit; + + value -= halfLimit; + return *this; + } + + template + template + T Angle::To() const + { + return Internal::AngleConversion::Convert(value); + } + + template + template + Angle Angle::ToAngle() const + { + return Angle(To()); + } + + template + constexpr T Angle::ToDegrees() const + { + return To(); + } + + template + constexpr Angle Angle::ToDegreeAngle() const + { + return ToAngle(); + } + + template + EulerAngles Angle::ToEulerAngles() const + { + return EulerAngles(0, 0, ToDegrees()); + } + + template + Quat Angle::ToQuat() const + { + auto halfAngle = Angle(*this) / 2.f; + auto sincos = halfAngle.GetSinCos(); + return Quat(sincos.second, 0, 0, sincos.first); + } + + template + constexpr T Angle::ToRadians() const + { + return To(); + } + + template + constexpr Angle Angle::ToRadianAngle() const + { + return ToAngle(); + } + + template + std::string Angle::ToString() const + { + std::ostringstream oss; + Internal::AngleUtils::ToString(oss, value); + + return oss.str(); + } + + template + constexpr T Angle::ToTurns() const + { + return To(value); + } + + template + constexpr Angle Angle::ToTurnAngle() const + { + return ToAngle(); + } + + template + constexpr Angle Angle::operator+() const + { + return *this; + } + + template + constexpr Angle Angle::operator-() const + { + return Angle(-value); + } + + template + constexpr Angle Angle::operator+(Angle other) const + { + return Angle(value + other.value); + } + + template + constexpr Angle Angle::operator-(Angle other) const + { + return Angle(value - other.value); + } + + template + constexpr Angle Angle::operator*(T scalar) const + { + return Angle(value * scalar); + } + + template + constexpr Angle Angle::operator/(T divider) const + { + return Angle(value / divider); + } + + template + constexpr Angle& Angle::operator+=(Angle other) + { + value += other.value; + return *this; + } + + template + constexpr Angle& Angle::operator-=(Angle other) + { + value -= other.value; + return *this; + } + + template + constexpr Angle& Angle::operator*=(T scalar) + { + value *= scalar; + return *this; + } + + template + constexpr Angle& Angle::operator/=(T divider) + { + value /= divider; + return *this; + } + + template + constexpr bool Angle::operator==(Angle other) const + { + return value == other.value; + } + + template + constexpr bool Angle::operator!=(Angle other) const + { + return value != other.value; + } + + template + constexpr bool Angle::operator<(Angle other) const + { + return value < other.value; + } + + template + constexpr bool Angle::operator<=(Angle other) const + { + return value <= other.value; + } + + template + constexpr bool Angle::operator>(Angle other) const + { + return value > other.value; + } + + template + constexpr bool Angle::operator>=(Angle other) const + { + return value >= other.value; + } + + template + constexpr bool Angle::ApproxEqual(const Angle& lhs, const Angle& rhs) + { + return lhs.ApproxEqual(rhs); + } + + template + constexpr bool Angle::ApproxEqual(const Angle& lhs, const Angle& rhs, T maxDifference) + { + return lhs.ApproxEqual(rhs, maxDifference); + } + + template + constexpr Angle Angle::Clamp(Angle angle, Angle min, Angle max) + { + return Angle(std::clamp(angle.value, min.value, max.value)); + } + + template + template + constexpr Angle Angle::From(T value) + { + return Angle(Internal::AngleConversion::Convert(value)); + } + + template + constexpr Angle Angle::FromDegrees(T degrees) + { + return From(degrees); + } + + template + constexpr Angle Angle::FromRadians(T radians) + { + return From(radians); + } + + template + constexpr Angle Angle::FromTurns(T turns) + { + return From(turns); + } + + template + constexpr Angle Angle::Zero() + { + return Angle(0); + } + + template + Angle operator/(T scale, Angle angle) + { + return Angle(scale / angle.value); + } + + template + std::ostream& operator<<(std::ostream& out, Angle angle) + { + return Internal::AngleUtils::ToString(out, angle.value); + } + + template + constexpr Angle Clamp(Angle value, T min, T max) + { + return std::max(std::min(value.value, max), min); + } +} diff --git a/runtime/Includes/Maths/Constants.h b/runtime/Includes/Maths/Constants.h new file mode 100644 index 0000000..015346f --- /dev/null +++ b/runtime/Includes/Maths/Constants.h @@ -0,0 +1,87 @@ +#ifndef __SCOP_MATHS_CONSTANTS__ +#define __SCOP_MATHS_CONSTANTS__ + +#include +#include +#include + +namespace Scop +{ + template constexpr std::size_t BitCount = CHAR_BIT * sizeof(T); + + template + struct MathConstants + { + static constexpr T Infinity() + { + static_assert(std::numeric_limits::has_infinity); + return std::numeric_limits::infinity(); + } + + static constexpr T Max() + { + return std::numeric_limits::max(); + } + + static constexpr T Min() + { + return std::numeric_limits::min(); + } + + static constexpr T NaN() + { + static_assert(std::numeric_limits::has_signaling_NaN); + return std::numeric_limits::quiet_NaN(); + } + + // Math constants + static constexpr T HalfPi() + { + static_assert(std::is_floating_point_v); + return T(1.5707963267948966192313216916398); + } + + static constexpr T Pi() + { + static_assert(std::is_floating_point_v); + return T(3.1415926535897932384626433832795); + } + + static constexpr T Sqrt2() + { + static_assert(std::is_floating_point_v); + return T(1.4142135623730950488016887242097); + } + + static constexpr T Sqrt3() + { + static_assert(std::is_floating_point_v); + return T(1.7320508075688772935274463415059); + } + + static constexpr T Sqrt5() + { + static_assert(std::is_floating_point_v); + return T(2.2360679774997896964091736687313); + } + + static constexpr T Tau() + { + static_assert(std::is_floating_point_v); + return T(6.2831853071795864769252867665590); + } + }; + + template constexpr auto Infinity() { return MathConstants::Infinity(); } + template constexpr auto MaxValue() { return MathConstants::Max(); } + template constexpr auto MinValue() { return MathConstants::Min(); } + template constexpr auto NaN() { return MathConstants::NaN(); } + template constexpr auto HalfPi() { return MathConstants::HalfPi(); } + template constexpr auto Pi() { return MathConstants::Pi(); } + template constexpr auto Sqrt2() { return MathConstants::Sqrt2(); } + template constexpr auto Sqrt3() { return MathConstants::Sqrt3(); } + template constexpr auto Sqrt5() { return MathConstants::Sqrt5(); } + template constexpr auto Tau() { return MathConstants::Tau(); } +} + +#endif diff --git a/runtime/Includes/Maths/Enums.h b/runtime/Includes/Maths/Enums.h new file mode 100644 index 0000000..df12fbd --- /dev/null +++ b/runtime/Includes/Maths/Enums.h @@ -0,0 +1,20 @@ +#ifndef __SCOPE_MATHS_ENUMS__ +#define __SCOPE_MATHS_ENUMS__ + +#include + +namespace Scop +{ + enum class AngleUnit + { + Degree = 0, + Radian, + Turn, + + EndEnum + }; + + constexpr std::size_t AngleUnitCount = static_cast(AngleUnit::EndEnum); +} + +#endif diff --git a/runtime/Includes/Maths/EulerAngles.h b/runtime/Includes/Maths/EulerAngles.h new file mode 100644 index 0000000..8a873cb --- /dev/null +++ b/runtime/Includes/Maths/EulerAngles.h @@ -0,0 +1,57 @@ +#ifndef __SCOP_EULER_ANGLES__ +#define __SCOP_EULER_ANGLES__ + +#include + +#include + +namespace Scop +{ + template + struct EulerAngles + { + constexpr EulerAngles() = default; + constexpr EulerAngles(DegreeAngle P, DegreeAngle Y, DegreeAngle R); + constexpr EulerAngles(const DegreeAngle angles[3]); + template constexpr EulerAngles(const Angle& angle); + constexpr EulerAngles(const Quat& quat); + template constexpr explicit EulerAngles(const EulerAngles& angles); + constexpr EulerAngles(const EulerAngles&) = default; + constexpr EulerAngles(EulerAngles&&) = default; + ~EulerAngles() = default; + + constexpr bool ApproxEqual(const EulerAngles& angles, T maxDifference = std::numeric_limits::epsilon()) const; + + constexpr EulerAngles& Normalize(); + + Quat ToQuat() const; + std::string ToString() const; + + constexpr EulerAngles operator+(const EulerAngles& angles) const; + constexpr EulerAngles operator-(const EulerAngles& angles) const; + + constexpr EulerAngles& operator=(const EulerAngles&) = default; + constexpr EulerAngles& operator=(EulerAngles&&) = default; + + constexpr EulerAngles& operator+=(const EulerAngles& angles); + constexpr EulerAngles& operator-=(const EulerAngles& angles); + + constexpr bool operator==(const EulerAngles& angles) const; + constexpr bool operator!=(const EulerAngles& angles) const; + constexpr bool operator<(const EulerAngles& angles) const; + constexpr bool operator<=(const EulerAngles& angles) const; + constexpr bool operator>(const EulerAngles& angles) const; + constexpr bool operator>=(const EulerAngles& angles) const; + + static constexpr bool ApproxEqual(const EulerAngles& lhs, const EulerAngles& rhs, T maxDifference = std::numeric_limits::epsilon()); + static constexpr EulerAngles Zero(); + + DegreeAngle pitch, yaw, roll; + }; + + using EulerAnglesf = EulerAngles; +} + +#include + +#endif diff --git a/runtime/Includes/Maths/EulerAngles.inl b/runtime/Includes/Maths/EulerAngles.inl new file mode 100644 index 0000000..97fab12 --- /dev/null +++ b/runtime/Includes/Maths/EulerAngles.inl @@ -0,0 +1,169 @@ +#pragma once +#include + +namespace Scop +{ + template + constexpr EulerAngles::EulerAngles(DegreeAngle P, DegreeAngle Y, DegreeAngle R) : + pitch(P), yaw(Y), roll(R) + {} + + template + constexpr EulerAngles::EulerAngles(const DegreeAngle angles[3]) : + EulerAngles(angles[0], angles[1], angles[2]) + {} + + template + template + constexpr EulerAngles::EulerAngles(const Angle& angle) : + EulerAngles(angle.ToEulerAngles()) + {} + + template + constexpr EulerAngles::EulerAngles(const Quat& quat) : + EulerAngles(quat.ToEulerAngles()) + {} + + template + template + constexpr EulerAngles::EulerAngles(const EulerAngles& angles) : + pitch(DegreeAngle(angles.pitch)), yaw(DegreeAngle(angles.yaw)), roll(DegreeAngle(angles.roll)) + {} + + template + constexpr bool EulerAngles::ApproxEqual(const EulerAngles& angles, T maxDifference) const + { + return pitch.ApproxEqual(angles.pitch, maxDifference) && yaw.ApproxEqual(angles.yaw, maxDifference) && roll.ApproxEqual(angles.roll, maxDifference); + } + + template + constexpr EulerAngles& EulerAngles::Normalize() + { + pitch.Normalize(); + yaw.Normalize(); + roll.Normalize(); + return *this; + } + + template + Quat EulerAngles::ToQuat() const + { + // XYZ + auto [s1, c1] = (yaw / T(2.0)).GetSinCos(); + auto [s2, c2] = (roll / T(2.0)).GetSinCos(); + auto [s3, c3] = (pitch / T(2.0)).GetSinCos(); + + return Quat(c1 * c2 * c3 - s1 * s2 * s3, + s1 * s2 * c3 + c1 * c2 * s3, + s1 * c2 * c3 + c1 * s2 * s3, + c1 * s2 * c3 - s1 * c2 * s3); + } + + template + std::string EulerAngles::ToString() const + { + std::ostringstream ss; + ss << *this; + return ss.str(); + } + + template + constexpr EulerAngles EulerAngles::operator+(const EulerAngles& angles) const + { + return EulerAngles(pitch + angles.pitch, yaw + angles.yaw, roll + angles.roll); + } + + template + constexpr EulerAngles EulerAngles::operator-(const EulerAngles& angles) const + { + return EulerAngles(pitch - angles.pitch, yaw - angles.yaw, roll - angles.roll); + } + + template + constexpr EulerAngles& EulerAngles::operator+=(const EulerAngles& angles) + { + pitch += angles.pitch; + yaw += angles.yaw; + roll += angles.roll; + return *this; + } + + template + constexpr EulerAngles& EulerAngles::operator-=(const EulerAngles& angles) + { + pitch -= angles.pitch; + yaw -= angles.yaw; + roll -= angles.roll; + return *this; + } + + template + constexpr bool EulerAngles::operator==(const EulerAngles& angles) const + { + return pitch == angles.pitch && yaw == angles.yaw && roll == angles.roll; + } + + template + constexpr bool EulerAngles::operator!=(const EulerAngles& angles) const + { + return !operator==(angles); + } + + template + constexpr bool EulerAngles::operator<(const EulerAngles& angles) const + { + if (pitch != angles.pitch) + return pitch < angles.pitch; + if (yaw != angles.yaw) + return yaw < angles.yaw; + return roll < angles.roll; + } + + template + constexpr bool EulerAngles::operator<=(const EulerAngles& angles) const + { + if (pitch != angles.pitch) + return pitch < angles.pitch; + if (yaw != angles.yaw) + return yaw < angles.yaw; + return roll <= angles.roll; + } + + template + constexpr bool EulerAngles::operator>(const EulerAngles& angles) const + { + if (pitch != angles.pitch) + return pitch > angles.pitch; + if (yaw != angles.yaw) + return yaw > angles.yaw; + return roll > angles.roll; + } + + template + constexpr bool EulerAngles::operator>=(const EulerAngles& angles) const + { + if (pitch != angles.pitch) + return pitch > angles.pitch; + if (yaw != angles.yaw) + return yaw > angles.yaw; + return roll >= angles.roll; + } + + template + constexpr bool EulerAngles::ApproxEqual(const EulerAngles& lhs, const EulerAngles& rhs, T maxDifference) + { + return lhs.ApproxEqual(rhs, maxDifference); + } + + template + constexpr EulerAngles EulerAngles::Zero() + { + return EulerAngles(0, 0, 0); + } + + template + std::ostream& operator<<(std::ostream& out, const EulerAngles& angles) + { + return out << "EulerAngles(" << angles.pitch << ", " << angles.yaw << ", " << angles.roll << ')'; + } +} diff --git a/runtime/Includes/Maths/Mat4.h b/runtime/Includes/Maths/Mat4.h new file mode 100644 index 0000000..4151ba9 --- /dev/null +++ b/runtime/Includes/Maths/Mat4.h @@ -0,0 +1,122 @@ +#ifndef __SCOP_MAT4__ +#define __SCOP_MAT4__ + +#include +#include +#include + +#include + +namespace Scop +{ + template struct Vec2; + template struct Vec3; + template struct Vec4; + template struct Quat; + + template + struct Mat4 + { + T m11, m12, m13, m14; + T m21, m22, m23, m24; + T m31, m32, m33, m34; + T m41, m42, m43, m44; + + constexpr Mat4() = default; + constexpr Mat4(T r11, T r12, T r13, T r14, + T r21, T r22, T r23, T r24, + T r31, T r32, T r33, T r34, + T r41, T r42, T r43, T r44); + constexpr Mat4(const T matrix[16]); + constexpr Mat4(const Mat4&) = default; + constexpr Mat4(Mat4&&) = default; + + constexpr Mat4& ApplyRotation(const Quat& rotation); + constexpr Mat4& ApplyScale(const Vec3& scale); + constexpr Mat4& ApplyTranslation(const Vec3& translation); + + constexpr bool ApproxEqual(const Mat4& vec, T max_difference = std::numeric_limits::epsilon()) const; + + constexpr Mat4& Concatenate(const Mat4& matrix); + constexpr Mat4& ConcatenateTransform(const Mat4& matrix); + + constexpr Vec4 GetColumn(std::size_t column) const; + constexpr T GetDeterminant() const; + constexpr T GetDeterminantTransform() const; + constexpr bool GetInverse(Mat4* dest) const; + constexpr bool GetInverseTransform(Mat4* dest) const; + Quat GetRotation() const; + constexpr Vec4 GetRow(std::size_t row) const; + constexpr Vec3 GetScale() const; + constexpr Vec3 GetSquaredScale() const; + constexpr Vec3 GetTranslation() const; + constexpr void GetTransposed(Mat4* dest) const; + + constexpr bool HasNegativeScale() const; + constexpr bool HasScale() const; + + constexpr Mat4& Inverse(bool* succeeded = nullptr); + constexpr Mat4& InverseTransform(bool* succeeded = nullptr); + + constexpr bool IsTransformMatrix() const; + constexpr bool IsIdentity() const; + + constexpr Mat4& SetRotation(const Quat& rotation); + constexpr Mat4& SetScale(const Vec3& scale); + constexpr Mat4& SetTranslation(const Vec3& translation); + + std::string ToString() const; + + constexpr Vec2 Transform(const Vec2& vector, T z = 0.0, T w = 1.0) const; + constexpr Vec3 Transform(const Vec3& vector, T w = 1.0) const; + constexpr Vec4 Transform(const Vec4& vector) const; + + constexpr Mat4& Transpose(); + + constexpr T& operator()(std::size_t x, std::size_t y); + constexpr const T& operator()(std::size_t x, std::size_t y) const; + + constexpr T& operator[](std::size_t i); + constexpr const T& operator[](std::size_t i) const; + + constexpr Mat4& operator=(const Mat4&) = default; + constexpr Mat4& operator=(Mat4&&) = default; + + constexpr Mat4 operator*(const Mat4& matrix) const; + constexpr Vec2 operator*(const Vec2& vector) const; + constexpr Vec3 operator*(const Vec3& vector) const; + constexpr Vec4 operator*(const Vec4& vector) const; + constexpr Mat4 operator*(T scalar) const; + + constexpr Mat4& operator*=(const Mat4& matrix); + constexpr Mat4& operator*=(T scalar); + + constexpr bool operator==(const Mat4& mat) const; + constexpr bool operator!=(const Mat4& mat) const; + + static constexpr bool ApproxEqual(const Mat4& lhs, const Mat4& rhs, T max_difference = std::numeric_limits::epsilon()); + static constexpr Mat4 Concatenate(const Mat4& left, const Mat4& right); + static constexpr Mat4 ConcatenateTransform(const Mat4& left, const Mat4& right); + static constexpr Mat4 Identity(); + static constexpr Mat4 LookAt(const Vec3& eye, const Vec3& target, const Vec3& up = Vec3::Up()); + static constexpr Mat4 Ortho(T left, T right, T top, T bottom, T zNear = -1.0, T zFar = 1.0); + static Mat4 Perspective(RadianAngle angle, T ratio, T zNear, T zFar); + static constexpr Mat4 Rotate(const Quat& rotation); + static constexpr Mat4 Scale(const Vec3& scale); + static constexpr Mat4 Translate(const Vec3& translation); + static constexpr Mat4 Transform(const Vec3& translation, const Quat& rotation); + static constexpr Mat4 Transform(const Vec3& translation, const Quat& rotation, const Vec3& scale); + static constexpr Mat4 TransformInverse(const Vec3& translation, const Quat& rotation); + static constexpr Mat4 TransformInverse(const Vec3& translation, const Quat& rotation, const Vec3& scale); + static constexpr Mat4 Zero(); + + ~Mat4() = default; + }; + + using Mat4d = Mat4; + using Mat4f = Mat4; +} + +#include + +#endif diff --git a/runtime/Includes/Maths/Mat4.inl b/runtime/Includes/Maths/Mat4.inl new file mode 100644 index 0000000..7f67930 --- /dev/null +++ b/runtime/Includes/Maths/Mat4.inl @@ -0,0 +1,879 @@ +#pragma once +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace Scop +{ + template + constexpr Mat4::Mat4(T r11, T r12, T r13, T r14, + T r21, T r22, T r23, T r24, + T r31, T r32, T r33, T r34, + T r41, T r42, T r43, T r44) : + m11(r11), m12(r12), m13(r13), m14(r14), + m21(r21), m22(r22), m23(r23), m24(r24), + m31(r31), m32(r32), m33(r33), m34(r34), + m41(r41), m42(r42), m43(r43), m44(r44) + {} + + template + constexpr Mat4::Mat4(const T matrix[16]) : + Mat4(matrix[ 0], matrix[ 1], matrix[ 2], matrix[ 3], + matrix[ 4], matrix[ 5], matrix[ 6], matrix[ 7], + matrix[ 8], matrix[ 9], matrix[10], matrix[11], + matrix[12], matrix[13], matrix[14], matrix[15]) + {} + + template + constexpr Mat4& Mat4::ApplyRotation(const Quat& rotation) + { + return Concatenate(Mat4::Rotate(rotation)); + } + + template + constexpr Mat4& Mat4::ApplyScale(const Vec3& scale) + { + m11 *= scale.x; + m12 *= scale.x; + m13 *= scale.x; + + m21 *= scale.y; + m22 *= scale.y; + m23 *= scale.y; + + m31 *= scale.z; + m32 *= scale.z; + m33 *= scale.z; + + return *this; + } + + template + constexpr Mat4& Mat4::ApplyTranslation(const Vec3& translation) + { + m41 += translation.x; + m42 += translation.y; + m43 += translation.z; + + return *this; + } + + template + constexpr bool Mat4::ApproxEqual(const Mat4& mat, T maxDifference) const + { + for(unsigned int i = 0; i < 16; ++i) + if(!NumberEquals((&m11)[i], (&mat.m11)[i], maxDifference)) + return false; + + return true; + } + + template + constexpr Mat4& Mat4::Concatenate(const Mat4& matrix) + { + return operator=(Mat4( + m11 * matrix.m11 + m12 * matrix.m21 + m13 * matrix.m31 + m14 * matrix.m41, + m11 * matrix.m12 + m12 * matrix.m22 + m13 * matrix.m32 + m14 * matrix.m42, + m11 * matrix.m13 + m12 * matrix.m23 + m13 * matrix.m33 + m14 * matrix.m43, + m11 * matrix.m14 + m12 * matrix.m24 + m13 * matrix.m34 + m14 * matrix.m44, + + m21 * matrix.m11 + m22 * matrix.m21 + m23 * matrix.m31 + m24 * matrix.m41, + m21 * matrix.m12 + m22 * matrix.m22 + m23 * matrix.m32 + m24 * matrix.m42, + m21 * matrix.m13 + m22 * matrix.m23 + m23 * matrix.m33 + m24 * matrix.m43, + m21 * matrix.m14 + m22 * matrix.m24 + m23 * matrix.m34 + m24 * matrix.m44, + + m31 * matrix.m11 + m32 * matrix.m21 + m33 * matrix.m31 + m34 * matrix.m41, + m31 * matrix.m12 + m32 * matrix.m22 + m33 * matrix.m32 + m34 * matrix.m42, + m31 * matrix.m13 + m32 * matrix.m23 + m33 * matrix.m33 + m34 * matrix.m43, + m31 * matrix.m14 + m32 * matrix.m24 + m33 * matrix.m34 + m34 * matrix.m44, + + m41 * matrix.m11 + m42 * matrix.m21 + m43 * matrix.m31 + m44 * matrix.m41, + m41 * matrix.m12 + m42 * matrix.m22 + m43 * matrix.m32 + m44 * matrix.m42, + m41 * matrix.m13 + m42 * matrix.m23 + m43 * matrix.m33 + m44 * matrix.m43, + m41 * matrix.m14 + m42 * matrix.m24 + m43 * matrix.m34 + m44 * matrix.m44 + )); + } + + template + constexpr Mat4& Mat4::ConcatenateTransform(const Mat4& matrix) + { + return operator=(Mat4( + m11*matrix.m11 + m12*matrix.m21 + m13*matrix.m31, + m11*matrix.m12 + m12*matrix.m22 + m13*matrix.m32, + m11*matrix.m13 + m12*matrix.m23 + m13*matrix.m33, + T(0.0), + + m21*matrix.m11 + m22*matrix.m21 + m23*matrix.m31, + m21*matrix.m12 + m22*matrix.m22 + m23*matrix.m32, + m21*matrix.m13 + m22*matrix.m23 + m23*matrix.m33, + T(0.0), + + m31*matrix.m11 + m32*matrix.m21 + m33*matrix.m31, + m31*matrix.m12 + m32*matrix.m22 + m33*matrix.m32, + m31*matrix.m13 + m32*matrix.m23 + m33*matrix.m33, + T(0.0), + + m41*matrix.m11 + m42*matrix.m21 + m43*matrix.m31 + matrix.m41, + m41*matrix.m12 + m42*matrix.m22 + m43*matrix.m32 + matrix.m42, + m41*matrix.m13 + m42*matrix.m23 + m43*matrix.m33 + matrix.m43, + T(1.0) + )); + } + + template + constexpr Vec4 Mat4::GetColumn(std::size_t column) const + { + Assert(column < 4, "column index out of range"); + const T* ptr = &m11 + column * 4; + return Vec4(ptr[0], ptr[1], ptr[2], ptr[3]); + } + + template + constexpr T Mat4::GetDeterminant() const + { + T A = m22*(m33*m44 - m43*m34) - m32*(m23*m44 - m43*m24) + m42*(m23*m34 - m33*m24); + T B = m12*(m33*m44 - m43*m34) - m32*(m13*m44 - m43*m14) + m42*(m13*m34 - m33*m14); + T C = m12*(m23*m44 - m43*m24) - m22*(m13*m44 - m43*m14) + m42*(m13*m24 - m23*m14); + T D = m12*(m23*m34 - m33*m24) - m22*(m13*m34 - m33*m14) + m32*(m13*m24 - m23*m14); + + return m11*A - m21*B + m31*C - m41*D; + } + + template + constexpr T Mat4::GetDeterminantTransform() const + { + T A = m22*m33 - m32*m23; + T B = m12*m33 - m32*m13; + T C = m12*m23 - m22*m13; + + return m11*A - m21*B + m31*C; + } + + template + constexpr bool Mat4::GetInverse(Mat4* dest) const + { + Assert(dest, "destination matrix must be valid"); + + T det = GetDeterminant(); + if(det == T(0.0)) + return false; + + // http://stackoverflow.com/questions/1148309/inverting-a-4x4-matrix + T inv[16]; + inv[0] = m22 * m33 * m44 - + m22 * m34 * m43 - + m32 * m23 * m44 + + m32 * m24 * m43 + + m42 * m23 * m34 - + m42 * m24 * m33; + + inv[1] = -m12 * m33 * m44 + + m12 * m34 * m43 + + m32 * m13 * m44 - + m32 * m14 * m43 - + m42 * m13 * m34 + + m42 * m14 * m33; + + inv[2] = m12 * m23 * m44 - + m12 * m24 * m43 - + m22 * m13 * m44 + + m22 * m14 * m43 + + m42 * m13 * m24 - + m42 * m14 * m23; + + inv[3] = -m12 * m23 * m34 + + m12 * m24 * m33 + + m22 * m13 * m34 - + m22 * m14 * m33 - + m32 * m13 * m24 + + m32 * m14 * m23; + + inv[4] = -m21 * m33 * m44 + + m21 * m34 * m43 + + m31 * m23 * m44 - + m31 * m24 * m43 - + m41 * m23 * m34 + + m41 * m24 * m33; + + inv[5] = m11 * m33 * m44 - + m11 * m34 * m43 - + m31 * m13 * m44 + + m31 * m14 * m43 + + m41 * m13 * m34 - + m41 * m14 * m33; + + inv[6] = -m11 * m23 * m44 + + m11 * m24 * m43 + + m21 * m13 * m44 - + m21 * m14 * m43 - + m41 * m13 * m24 + + m41 * m14 * m23; + + inv[7] = m11 * m23 * m34 - + m11 * m24 * m33 - + m21 * m13 * m34 + + m21 * m14 * m33 + + m31 * m13 * m24 - + m31 * m14 * m23; + + inv[8] = m21 * m32 * m44 - + m21 * m34 * m42 - + m31 * m22 * m44 + + m31 * m24 * m42 + + m41 * m22 * m34 - + m41 * m24 * m32; + + inv[9] = -m11 * m32 * m44 + + m11 * m34 * m42 + + m31 * m12 * m44 - + m31 * m14 * m42 - + m41 * m12 * m34 + + m41 * m14 * m32; + + inv[10] = m11 * m22 * m44 - + m11 * m24 * m42 - + m21 * m12 * m44 + + m21 * m14 * m42 + + m41 * m12 * m24 - + m41 * m14 * m22; + + inv[11] = -m11 * m22 * m34 + + m11 * m24 * m32 + + m21 * m12 * m34 - + m21 * m14 * m32 - + m31 * m12 * m24 + + m31 * m14 * m22; + + inv[12] = -m21 * m32 * m43 + + m21 * m33 * m42 + + m31 * m22 * m43 - + m31 * m23 * m42 - + m41 * m22 * m33 + + m41 * m23 * m32; + + inv[13] = m11 * m32 * m43 - + m11 * m33 * m42 - + m31 * m12 * m43 + + m31 * m13 * m42 + + m41 * m12 * m33 - + m41 * m13 * m32; + + inv[14] = -m11 * m22 * m43 + + m11 * m23 * m42 + + m21 * m12 * m43 - + m21 * m13 * m42 - + m41 * m12 * m23 + + m41 * m13 * m22; + + inv[15] = m11 * m22 * m33 - + m11 * m23 * m32 - + m21 * m12 * m33 + + m21 * m13 * m32 + + m31 * m12 * m23 - + m31 * m13 * m22; + + T invDet = T(1.0) / det; + for(unsigned int i = 0; i < 16; ++i) + inv[i] *= invDet; + + *dest = inv; + return true; + } + + template + constexpr bool Mat4::GetInverseTransform(Mat4* dest) const + { + Assert(dest, "destination matrix must be valid"); + + T det = GetDeterminantTransform(); + if(det == T(0.0)) + return false; + + + // http://stackoverflow.com/questions/1148309/inverting-a-4x4-matrix + T inv[16]; + inv[0] = m22 * m33 - + m32 * m23; + + inv[1] = -m12 * m33 + + m32 * m13; + + inv[2] = m12 * m23 - + m22 * m13; + + inv[3] = T(0.0); + + inv[4] = -m21 * m33 + + m31 * m23; + + inv[5] = m11 * m33 - + m31 * m13; + + inv[6] = -m11 * m23 + + m21 * m13; + + inv[7] = T(0.0); + + inv[8] = m21 * m32 - + m31 * m22; + + inv[9] = -m11 * m32 + + m31 * m12; + + inv[10] = m11 * m22 - + m21 * m12; + + inv[11] = T(0.0); + + inv[12] = -m21 * m32 * m43 + + m21 * m33 * m42 + + m31 * m22 * m43 - + m31 * m23 * m42 - + m41 * m22 * m33 + + m41 * m23 * m32; + + inv[13] = m11 * m32 * m43 - + m11 * m33 * m42 - + m31 * m12 * m43 + + m31 * m13 * m42 + + m41 * m12 * m33 - + m41 * m13 * m32; + + inv[14] = -m11 * m22 * m43 + + m11 * m23 * m42 + + m21 * m12 * m43 - + m21 * m13 * m42 - + m41 * m12 * m23 + + m41 * m13 * m22; + + T invDet = T(1.0) / det; + for(unsigned int i = 0; i < 16; ++i) + inv[i] *= invDet; + + inv[15] = T(1.0); + + *dest = inv; + return true; + } + + template + Quat Mat4::GetRotation() const + { + // http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuat/ + Quat quat; + + T trace = m11 + m22 + m33; + if(trace > T(0.0)) + { + T s = T(0.5) / std::sqrt(trace + T(1.0)); + quat.w = T(0.25) / s; + quat.x = (m23 - m32) * s; + quat.y = (m31 - m13) * s; + quat.z = (m12 - m21) * s; + } + else + { + if(m11 > m22 && m11 > m33) + { + T s = T(2.0) * std::sqrt(T(1.0) + m11 - m22 - m33); + + quat.w = (m23 - m32) / s; + quat.x = T(0.25) * s; + quat.y = (m21 + m12) / s; + quat.z = (m31 + m13) / s; + } + else if(m22 > m33) + { + T s = T(2.0) * std::sqrt(T(1.0) + m22 - m11 - m33); + + quat.w = (m31 - m13) / s; + quat.x = (m21 + m12) / s; + quat.y = T(0.25) * s; + quat.z = (m32 + m23) / s; + } + else + { + T s = T(2.0) * std::sqrt(T(1.0) + m33 - m11 - m22); + + quat.w = (m12 - m21) / s; + quat.x = (m31 + m13) / s; + quat.y = (m32 + m23) / s; + quat.z = T(0.25) * s; + } + } + + return quat; + } + + template + constexpr Vec4 Mat4::GetRow(std::size_t row) const + { + Assert(row < 4, "row index out of range"); + + const T* ptr = &m11; + return Vec4(ptr[row], ptr[row+4], ptr[row+8], ptr[row+12]); + } + + template + constexpr Vec3 Mat4::GetScale() const + { + Vec3 squaredScale = GetSquaredScale(); + return Vec3(std::sqrt(squaredScale.x), std::sqrt(squaredScale.y), std::sqrt(squaredScale.z)); + } + + template + constexpr Vec3 Mat4::GetSquaredScale() const + { + return Vec3(m11 * m11 + m12 * m12 + m13 * m13, + m21 * m21 + m22 * m22 + m23 * m23, + m31 * m31 + m32 * m32 + m33 * m33); + } + + template + constexpr Vec3 Mat4::GetTranslation() const + { + return Vec3(m41, m42, m43); + } + + template + constexpr void Mat4::GetTransposed(Mat4* dest) const + { + (*dest) = Mat4f( + m11, m21, m31, m41, + m12, m22, m32, m42, + m13, m23, m33, m43, + m14, m24, m34, m44 + ); + } + + template + constexpr bool Mat4::HasNegativeScale() const + { + return GetDeterminant() < T(0.0); + } + + template + constexpr bool Mat4::HasScale() const + { + T t = m11*m11 + m21*m21 + m31*m31; + if(!NumberEquals(t, T(1.0))) + return true; + + t = m12*m12 + m22*m22 + m32*m32; + if(!NumberEquals(t, T(1.0))) + return true; + + t = m13*m13 + m23*m23 + m33*m33; + if(!NumberEquals(t, T(1.0))) + return true; + + return false; + } + + template + constexpr Mat4& Mat4::Inverse(bool* succeeded) + { + bool result = GetInverse(this); + if(succeeded) + *succeeded = result; + + return *this; + } + + template + constexpr Mat4& Mat4::InverseTransform(bool* succeeded) + { + bool result = GetInverseTransform(this); + if(succeeded) + *succeeded = result; + + return *this; + } + + template + constexpr bool Mat4::IsTransformMatrix() const + { + return NumberEquals(m14, T(0.0)) && NumberEquals(m24, T(0.0)) && NumberEquals(m34, T(0.0)) && NumberEquals(m44, T(1.0)); + } + + template + constexpr bool Mat4::IsIdentity() const + { + return (NumberEquals(m11, T(1.0)) && NumberEquals(m12, T(0.0)) && NumberEquals(m13, T(0.0)) && NumberEquals(m14, T(0.0)) && + NumberEquals(m21, T(0.0)) && NumberEquals(m22, T(1.0)) && NumberEquals(m23, T(0.0)) && NumberEquals(m24, T(0.0)) && + NumberEquals(m31, T(0.0)) && NumberEquals(m32, T(0.0)) && NumberEquals(m33, T(1.0)) && NumberEquals(m34, T(0.0)) && + NumberEquals(m41, T(0.0)) && NumberEquals(m42, T(0.0)) && NumberEquals(m43, T(0.0)) && NumberEquals(m44, T(1.0))); + } + + template + constexpr Mat4& Mat4::SetRotation(const Quat& rotation) + { + T qw = rotation.w; + T qx = rotation.x; + T qy = rotation.y; + T qz = rotation.z; + + T qx2 = qx * qx; + T qy2 = qy * qy; + T qz2 = qz * qz; + + m11 = T(1.0) - T(2.0) * qy2 - T(2.0) * qz2; + m21 = T(2.0) * qx * qy - T(2.0) * qz * qw; + m31 = T(2.0) * qx * qz + T(2.0) * qy * qw; + + m12 = T(2.0) * qx * qy + T(2.0) * qz * qw; + m22 = T(1.0) - T(2.0) * qx2 - T(2.0) * qz2; + m32 = T(2.0) * qy * qz - T(2.0) * qx * qw; + + m13 = T(2.0) * qx * qz - T(2.0) * qy * qw; + m23 = T(2.0) * qy * qz + T(2.0) * qx * qw; + m33 = T(1.0) - T(2.0) * qx2 - T(2.0) * qy2; + + return *this; + } + + template + constexpr Mat4& Mat4::SetScale(const Vec3& scale) + { + m11 = scale.x; + m22 = scale.y; + m33 = scale.z; + + return *this; + } + + template + constexpr Mat4& Mat4::SetTranslation(const Vec3& translation) + { + m41 = translation.x; + m42 = translation.y; + m43 = translation.z; + + return *this; + } + + template + std::string Mat4::ToString() const + { + std::ostringstream ss; + ss << *this; + + return ss.str(); + } + + template + constexpr Vec2 Mat4::Transform(const Vec2& vector, T z, T w) const + { + return Vec2(m11 * vector.x + m21 * vector.y + m31 * z + m41 * w, + m12 * vector.x + m22 * vector.y + m32 * z + m42 * w); + } + + template + constexpr Vec3 Mat4::Transform(const Vec3& vector, T w) const + { + return Vec3(m11 * vector.x + m21 * vector.y + m31 * vector.z + m41 * w, + m12 * vector.x + m22 * vector.y + m32 * vector.z + m42 * w, + m13 * vector.x + m23 * vector.y + m33 * vector.z + m43 * w); + } + + template + constexpr Vec4 Mat4::Transform(const Vec4& vector) const + { + return Vec4(m11 * vector.x + m21 * vector.y + m31 * vector.z + m41 * vector.w, + m12 * vector.x + m22 * vector.y + m32 * vector.z + m42 * vector.w, + m13 * vector.x + m23 * vector.y + m33 * vector.z + m43 * vector.w, + m14 * vector.x + m24 * vector.y + m34 * vector.z + m44 * vector.w); + } + + template + constexpr Mat4& Mat4::Transpose() + { + std::swap(m12, m21); + std::swap(m13, m31); + std::swap(m14, m41); + std::swap(m23, m32); + std::swap(m24, m42); + std::swap(m34, m43); + + return *this; + } + + template + constexpr T& Mat4::operator()(std::size_t x, std::size_t y) + { + Assert(x <= 3, "index out of range"); + Assert(y <= 3, "index out of range"); + + return (&m11)[y*4 + x]; + } + + template + constexpr const T& Mat4::operator()(std::size_t x, std::size_t y) const + { + Assert(x <= 3, "index out of range"); + Assert(y <= 3, "index out of range"); + + return (&m11)[y*4+x]; + } + + template + constexpr T& Mat4::operator[](std::size_t i) + { + Assert(i <= 16, "index out of range"); + + return (&m11)[i]; + } + + template + constexpr const T& Mat4::operator[](std::size_t i) const + { + Assert(i <= 16, "index out of range"); + + return (&m11)[i]; + } + + template + constexpr Mat4 Mat4::operator*(const Mat4& matrix) const + { + Mat4 result(*this); + return result.Concatenate(matrix); + } + + template + constexpr Vec2 Mat4::operator*(const Vec2& vector) const + { + return Transform(vector); + } + + template + constexpr Vec3 Mat4::operator*(const Vec3& vector) const + { + return Transform(vector); + } + + template + constexpr Vec4 Mat4::operator*(const Vec4& vector) const + { + return Transform(vector); + } + + template + constexpr Mat4 Mat4::operator*(T scalar) const + { + Mat4 mat; + for(unsigned int i = 0; i < 16; ++i) + mat[i] = (&m11)[i] * scalar; + + return mat; + } + + template + constexpr Mat4& Mat4::operator*=(const Mat4& matrix) + { + Concatenate(matrix); + + return *this; + } + + template + constexpr Mat4& Mat4::operator*=(T scalar) + { + for(unsigned int i = 0; i < 16; ++i) + (&m11)[i] *= scalar; + + return *this; + } + + template + constexpr bool Mat4::operator==(const Mat4& mat) const + { + for(unsigned int i = 0; i < 16; ++i) + if((&m11)[i] != (&mat.m11)[i]) + return false; + + return true; + } + + template + constexpr bool Mat4::operator!=(const Mat4& mat) const + { + return !operator==(mat); + } + + template + constexpr bool Mat4::ApproxEqual(const Mat4& lhs, const Mat4& rhs, T maxDifference) + { + return lhs.ApproxEqual(rhs, maxDifference); + } + + template + constexpr Mat4 Mat4::Concatenate(const Mat4& left, const Mat4& right) + { + Mat4 matrix(left); // Copy of left-hand side matrix + matrix.Concatenate(right); // Concatenation with right-hand side + + return matrix; + } + + template + constexpr Mat4 Mat4::ConcatenateTransform(const Mat4& left, const Mat4& right) + { + Mat4 matrix(left); // Copy of left-hand side matrix + matrix.ConcatenateTransform(right); // Affine concatenation with right-hand side + + return matrix; + } + + template + constexpr Mat4 Mat4::Identity() + { + return Mat4( + T(1.0), T(0.0), T(0.0), T(0.0), + T(0.0), T(1.0), T(0.0), T(0.0), + T(0.0), T(0.0), T(1.0), T(0.0), + T(0.0), T(0.0), T(0.0), T(1.0) + ); + } + + template + constexpr Mat4 Mat4::LookAt(const Vec3& eye, const Vec3& target, const Vec3& up) + { + Vec3 f = Vec3::Normalize(target - eye); + Vec3 s = Vec3::Normalize(f.CrossProduct(up)); + Vec3 u = s.CrossProduct(f); + + return Mat4( + s.x, u.x, -f.x, T(0.0), + s.y, u.y, -f.y, T(0.0), + s.z, u.z, -f.z, T(0.0), + -s.DotProduct(eye), -u.DotProduct(eye), f.DotProduct(eye), T(1.0) + ); + } + + template + constexpr Mat4 Mat4::Ortho(T left, T right, T top, T bottom, T zNear, T zFar) + { + // http://msdn.microsoft.com/en-us/library/windows/desktop/bb204942(v=vs.85).aspx + return Mat4( + T(2.0) / (right - left), T(0.0), T(0.0), T(0.0), + T(0.0), T(2.0) / (top - bottom), T(0.0), T(0.0), + T(0.0), T(0.0), T(1.0) / (zNear - zFar), T(0.0), + (left + right) / (left - right), (top + bottom) / (bottom - top), zNear / (zNear - zFar), T(1.0) + ); + } + + template + Mat4 Mat4::Perspective(RadianAngle angle, T ratio, T zNear, T zFar) + { + angle /= T(2.0); + + T yScale = angle.GetTan(); + + return Mat4( + T(1.0) / (ratio * yScale), T(0.0), T(0.0), T(0.0), + T(0.0), T(-1.0) / (yScale), T(0.0), T(0.0), + T(0.0), T(0.0), zFar / (zNear - zFar), T(-1.0), + T(0.0), T(0.0), -(zNear * zFar) / (zFar - zNear), T(0.0) + ); + } + + template + constexpr Mat4 Mat4::Rotate(const Quat& rotation) + { + Mat4 matrix = Mat4::Identity(); + matrix.SetRotation(rotation); + + return matrix; + } + + template + constexpr Mat4 Mat4::Scale(const Vec3& scale) + { + return Mat4( + scale.x, T(0.0), T(0.0), T(0.0), + T(0.0), scale.y, T(0.0), T(0.0), + T(0.0), T(0.0), scale.z, T(0.0), + T(0.0), T(0.0), T(0.0), T(1.0) + ); + } + + template + constexpr Mat4 Mat4::Translate(const Vec3& translation) + { + return Mat4( + T(1.0), T(0.0), T(0.0), T(0.0), + T(0.0), T(1.0), T(0.0), T(0.0), + T(0.0), T(0.0), T(1.0), T(0.0), + translation.x, translation.y, translation.z, T(1.0) + ); + } + + template + constexpr Mat4 Mat4::Transform(const Vec3& translation, const Quat& rotation) + { + Mat4 mat = Mat4f::Identity(); + mat.SetRotation(rotation); + mat.SetTranslation(translation); + + return mat; + } + + template + constexpr Mat4 Mat4::Transform(const Vec3& translation, const Quat& rotation, const Vec3& scale) + { + Mat4 mat = Transform(translation, rotation); + mat.ApplyScale(scale); + + return mat; + } + + template + constexpr Mat4 Mat4::TransformInverse(const Vec3& translation, const Quat& rotation) + { + // A view matrix must apply an inverse transformation of the 'world' matrix + Quat invRot = rotation.GetConjugate(); // Inverse of the rotation + + return Transform(-(invRot * translation), invRot); + } + + template + constexpr Mat4 Mat4::TransformInverse(const Vec3& translation, const Quat& rotation, const Vec3& scale) + { + return TransformInverse(translation, rotation).ApplyScale(T(1.0) / scale); + } + + template + constexpr Mat4 Mat4::Zero() + { + return Mat4( + T(0.0), T(0.0), T(0.0), T(0.0), + T(0.0), T(0.0), T(0.0), T(0.0), + T(0.0), T(0.0), T(0.0), T(0.0), + T(0.0), T(0.0), T(0.0), T(0.0) + ); + } + + template + std::ostream& operator<<(std::ostream& out, const Mat4& matrix) + { + return out << "Mat4(" << matrix.m11 << ", " << matrix.m12 << ", " << matrix.m13 << ", " << matrix.m14 << ",\n" + << " " << matrix.m21 << ", " << matrix.m22 << ", " << matrix.m23 << ", " << matrix.m24 << ",\n" + << " " << matrix.m31 << ", " << matrix.m32 << ", " << matrix.m33 << ", " << matrix.m34 << ",\n" + << " " << matrix.m41 << ", " << matrix.m42 << ", " << matrix.m43 << ", " << matrix.m44 << ')'; + } + + template + constexpr Mat4 operator*(T scale, const Mat4& matrix) + { + return matrix * scale; + } +} diff --git a/runtime/Includes/Maths/MathsUtils.h b/runtime/Includes/Maths/MathsUtils.h new file mode 100644 index 0000000..c17d0da --- /dev/null +++ b/runtime/Includes/Maths/MathsUtils.h @@ -0,0 +1,26 @@ +#ifndef __SCOP_MATHS_UTILS__ +#define __SCOP_MATHS_UTILS__ + +#include + +namespace Scop +{ + template + [[nodiscard]] constexpr T Mod(T x, T y) noexcept; + + template + [[nodiscard]] constexpr T DegreeToRadian(T degrees) noexcept; + + template + [[nodiscard]] constexpr T RadianToDegree(T radians) noexcept; + + template + [[nodiscard]] constexpr T Clamp(T value, T min, T max) noexcept; + + template + [[nodiscard]] constexpr T Lerp(const T& from, const T& to, const T2& interpolation) noexcept; +} + +#include + +#endif diff --git a/runtime/Includes/Maths/MathsUtils.inl b/runtime/Includes/Maths/MathsUtils.inl new file mode 100644 index 0000000..d47a421 --- /dev/null +++ b/runtime/Includes/Maths/MathsUtils.inl @@ -0,0 +1,47 @@ +#pragma once +#include + +#include +#include + +#include + +namespace Scop +{ + template + [[nodiscard]] constexpr T Mod(T x, T y) noexcept + { + if constexpr(std::is_floating_point_v) + { + if(!std::is_constant_evaluated()) + return x - static_cast(x / y) * y; + else + return std::fmod(x, y); + } + return x % y; + } + + template + [[nodiscard]] constexpr T DegreeToRadian(T degrees) noexcept + { + return degrees * (Pi() / T(180.0)); + } + + template + [[nodiscard]] constexpr T RadianToDegree(T radians) noexcept + { + return radians * (T(180.0) / Pi()); + } + + template + [[nodiscard]] constexpr T Clamp(T value, T min, T max) noexcept + { + return std::max(std::min(value, max), min); + } + + template + [[nodiscard]] constexpr T Lerp(const T& from, const T& to, const T2& interpolation) noexcept + { + return static_cast(from + interpolation * (to - from)); + } +} diff --git a/runtime/Includes/Maths/Quaternions.h b/runtime/Includes/Maths/Quaternions.h new file mode 100644 index 0000000..5937268 --- /dev/null +++ b/runtime/Includes/Maths/Quaternions.h @@ -0,0 +1,91 @@ +#ifndef __SCOP_QUATERNIONS__ +#define __SCOP_QUATERNIONS__ + +#include +#include + +namespace Scop +{ + template + struct Quat + { + T w, x, y, z; + + constexpr Quat() = default; + constexpr Quat(T W, T X, T Y, T Z); + template Quat(const Angle& angle); + Quat(const EulerAngles& angles); + constexpr Quat(RadianAngle angle, const Vec3& axis); + constexpr Quat(const T quat[4]); + template constexpr explicit Quat(const Quat& quat); + constexpr Quat(const Quat&) = default; + constexpr Quat(Quat&&) = default; + ~Quat() = default; + + RadianAngle AngleBetween(const Quat& vec) const; + constexpr bool ApproxEqual(const Quat& quat, T maxDifference = std::numeric_limits::epsilon()) const; + + Quat& ComputeW(); + constexpr Quat& Conjugate(); + + constexpr T DotProduct(const Quat& vec) const; + + constexpr Quat GetConjugate() const; + Quat GetInverse() const; + Quat GetNormal(T* length = nullptr) const; + + Quat& Inverse(); + + T Magnitude() const; + + Quat& Normalize(T* length = nullptr); + + constexpr T SquaredMagnitude() const; + + RadianAngle To2DAngle() const; + EulerAngles ToEulerAngles() const; + std::string ToString() const; + + constexpr Quat& operator=(const Quat& quat) = default; + constexpr Quat& operator=(Quat&&) = default; + + constexpr Quat operator+(const Quat& quat) const; + constexpr Quat operator*(const Quat& quat) const; + constexpr Vec3 operator*(const Vec3& vec) const; + constexpr Quat operator*(T scale) const; + constexpr Quat operator/(const Quat& quat) const; + + constexpr Quat& operator+=(const Quat& quat); + constexpr Quat& operator*=(const Quat& quat); + constexpr Quat& operator*=(T scale); + constexpr Quat& operator/=(const Quat& quat); + + constexpr bool operator==(const Quat& quat) const; + constexpr bool operator!=(const Quat& quat) const; + constexpr bool operator<(const Quat& quat) const; + constexpr bool operator<=(const Quat& quat) const; + constexpr bool operator>(const Quat& quat) const; + constexpr bool operator>=(const Quat& quat) const; + + static RadianAngle AngleBetween(const Quat& lhs, const Quat& rhs); + static constexpr bool ApproxEqual(const Quat& lhs, const Quat& rhs, T maxDifference = std::numeric_limits::epsilon()); + static constexpr Quat Identity(); + static constexpr Quat Lerp(const Quat& from, const Quat& to, T interpolation); + static Quat LookAt(const Vec3& forward, const Vec3& up); + static Quat Normalize(const Quat& quat, T* length = nullptr); + static Quat RotationBetween(const Vec3& from, const Vec3& to); + static Quat RotateTowards(const Quat& from, const Quat& to, RadianAngle maxRotation); + static Quat Mirror(Quat quat, const Vec3& axis); + static Quat Slerp(const Quat& from, const Quat& to, T interpolation); + static constexpr Quat Zero(); + }; + + using Quatd = Quat; + using Quatf = Quat; + + template std::ostream& operator<<(std::ostream& out, const Quat& quat); +} + +#include + +#endif diff --git a/runtime/Includes/Maths/Quaternions.inl b/runtime/Includes/Maths/Quaternions.inl new file mode 100644 index 0000000..bea9a95 --- /dev/null +++ b/runtime/Includes/Maths/Quaternions.inl @@ -0,0 +1,508 @@ +#pragma once +#include + +namespace Scop +{ + template + constexpr Quat::Quat(T W, T X, T Y, T Z) : w(W), x(X), y(Y), z(Z) + {} + + template + template + Quat::Quat(const Angle& angle) : Quat(angle.ToQuat()) + {} + + template + Quat::Quat(const EulerAngles& angles) : Quat(angles.ToQuat()) + {} + + template + constexpr Quat::Quat(RadianAngle angle, const Vec3& axis) + { + angle /= T(2.0); + + Vec3 normalizedAxis = axis.GetNormal(); + + auto sincos = angle.GetSinCos(); + + w = sincos.second; + x = normalizedAxis.x * sincos.first; + y = normalizedAxis.y * sincos.first; + z = normalizedAxis.z * sincos.first; + + Normalize(); + } + + template + constexpr Quat::Quat(const T quat[4]) : w(quat[0]), x(quat[1]), y(quat[2]), z(quat[3]) + {} + + template + template + constexpr Quat::Quat(const Quat& quat) : w(static_cast(quat.w)), x(static_cast(quat.x)), y(static_cast(quat.y)), z(static_cast(quat.z)) + {} + + template + RadianAngle Quat::AngleBetween(const Quat& quat) const + { + T alpha = Vec3::DotProduct(Vec3(x, y, z), Vec3(quat.x, quat.y, quat.z)); + return std::acos(Scop::Clamp(alpha, T(-1.0), T(1.0))); + } + + template + constexpr bool Quat::ApproxEqual(const Quat& quat, T maxDifference) const + { + return NumberEquals(w, quat.w, maxDifference) && + NumberEquals(x, quat.x, maxDifference) && + NumberEquals(y, quat.y, maxDifference) && + NumberEquals(z, quat.z, maxDifference); + } + + template + Quat& Quat::ComputeW() + { + T t = T(1.0) - SquaredMagnitude(); + + if(t < T(0.0)) + w = T(0.0); + else + w = -std::sqrt(t); + + return *this; + } + + template + constexpr Quat& Quat::Conjugate() + { + x = -x; + y = -y; + z = -z; + return *this; + } + + template + constexpr T Quat::DotProduct(const Quat& quat) const + { + return w * quat.w + x * quat.x + y * quat.y + z * quat.z; + } + + template + constexpr Quat Quat::GetConjugate() const + { + Quat quat(*this); + quat.Conjugate(); + return quat; + } + + template + Quat Quat::GetInverse() const + { + Quat quat(*this); + quat.Inverse(); + return quat; + } + + template + Quat Quat::GetNormal(T* length) const + { + Quat quat(*this); + quat.Normalize(length); + return quat; + } + + template + Quat& Quat::Inverse() + { + T norm = SquaredMagnitude(); + if(norm > T(0.0)) + { + T invNorm = T(1.0) / std::sqrt(norm); + + w *= invNorm; + x *= -invNorm; + y *= -invNorm; + z *= -invNorm; + } + + return *this; + } + + template + T Quat::Magnitude() const + { + return std::sqrt(SquaredMagnitude()); + } + + template + Quat& Quat::Normalize(T* length) + { + T norm = std::sqrt(SquaredMagnitude()); + if(norm > T(0.0)) + { + T invNorm = T(1.0) / norm; + w *= invNorm; + x *= invNorm; + y *= invNorm; + z *= invNorm; + } + + if(length) + *length = norm; + + return *this; + } + + template + constexpr T Quat::SquaredMagnitude() const + { + return w * w + x * x + y * y + z * z; + } + + template + RadianAngle Quat::To2DAngle() const + { + T siny_cosp = T(2.0) * (w * z + x * y); + T cosy_cosp = T(1.0) - T(2.0) * (y * y + z * z); + + return std::atan2(siny_cosp, cosy_cosp); + } + + template + EulerAngles Quat::ToEulerAngles() const + { + T test = x * y + z * w; + if(test > T(0.499)) + // singularity at north pole + return EulerAngles(DegreeAngle(T(0.0)), RadianAngle(T(2.0) * std::atan2(x, w)), DegreeAngle(T(90.0))); + + if(test < T(-0.499)) + // singularity at south pole + return EulerAngles(DegreeAngle(T(0.0)), RadianAngle(T(-2.0) * std::atan2(x, w)), DegreeAngle(T(-90.0))); + + return EulerAngles(RadianAngle(std::atan2(T(2.0) * x * w - T(2.0) * y * z, T(1.0) - T(2.0) * x * x - T(2.0) * z * z)), + RadianAngle(std::atan2(T(2.0) * y * w - T(2.0) * x * z, T(1.0) - T(2.0) * y * y - T(2.0) * z * z)), + RadianAngle(std::asin(T(2.0) * test))); + } + + template + std::string Quat::ToString() const + { + std::ostringstream ss; + ss << *this; + + return ss.str(); + } + + template + constexpr Quat Quat::operator+(const Quat& quat) const + { + Quat result; + result.w = w + quat.w; + result.x = x + quat.x; + result.y = y + quat.y; + result.z = z + quat.z; + + return result; + } + + template + constexpr Quat Quat::operator*(const Quat& quat) const + { + Quat result; + result.w = w * quat.w - x * quat.x - y * quat.y - z * quat.z; + result.x = w * quat.x + x * quat.w + y * quat.z - z * quat.y; + result.y = w * quat.y + y * quat.w + z * quat.x - x * quat.z; + result.z = w * quat.z + z * quat.w + x * quat.y - y * quat.x; + + return result; + } + + template + constexpr Vec3 Quat::operator*(const Vec3& vec) const + { + Vec3 quatVec(x, y, z); + Vec3 uv = quatVec.CrossProduct(vec); + Vec3 uuv = quatVec.CrossProduct(uv); + uv *= T(2.0) * w; + uuv *= T(2.0); + + return vec + uv + uuv; + } + + template + constexpr Quat Quat::operator*(T scale) const + { + return Quat(w * scale, + x * scale, + y * scale, + z * scale); + } + + template + constexpr Quat Quat::operator/(const Quat& quat) const + { + return quat.GetConjugate() * (*this); + } + + template + constexpr Quat& Quat::operator+=(const Quat& quat) + { + return operator=(operator+(quat)); + } + + template + constexpr Quat& Quat::operator*=(const Quat& quat) + { + return operator=(operator*(quat)); + } + + template + constexpr Quat& Quat::operator*=(T scale) + { + return operator=(operator*(scale)); + } + + template + constexpr Quat& Quat::operator/=(const Quat& quat) + { + return operator=(operator/(quat)); + } + + template + constexpr bool Quat::operator==(const Quat& quat) const + { + return w == quat.w && x == quat.x && y == quat.y && z == quat.z; + } + + template + constexpr bool Quat::operator!=(const Quat& quat) const + { + return !operator==(quat); + } + + template + constexpr bool Quat::operator<(const Quat& quat) const + { + if(w != quat.w) + return w < quat.w; + + if(x != quat.x) + return x < quat.x; + + if(y != quat.y) + return y < quat.y; + + if(z != quat.z) + return z < quat.z; + } + + template + constexpr bool Quat::operator<=(const Quat& quat) const + { + if(w != quat.w) + return w < quat.w; + + if(x != quat.x) + return x < quat.x; + + if(y != quat.y) + return y < quat.y; + + if(z != quat.z) + return z <= quat.z; + } + + template + constexpr bool Quat::operator>(const Quat& quat) const + { + if(w != quat.w) + return w > quat.w; + + if(x != quat.x) + return x > quat.x; + + if(y != quat.y) + return y > quat.y; + + if(z != quat.z) + return z > quat.z; + } + + template + constexpr bool Quat::operator>=(const Quat& quat) const + { + if(w != quat.w) + return w > quat.w; + + if(x != quat.x) + return x > quat.x; + + if(y != quat.y) + return y > quat.y; + + if(z != quat.z) + return z >= quat.z; + } + + template + RadianAngle Quat::AngleBetween(const Quat& lhs, const Quat& rhs) + { + return lhs.AngleBetween(rhs); + } + + template + constexpr bool Quat::ApproxEqual(const Quat& lhs, const Quat& rhs, T maxDifference) + { + return lhs.ApproxEqual(rhs, maxDifference); + } + + template + constexpr Quat Quat::Identity() + { + return Quat(1, 0, 0, 0); + } + + template + constexpr Quat Quat::Lerp(const Quat& from, const Quat& to, T interpolation) + { + Quat interpolated; + interpolated.w = Scop::Lerp(from.w, to.w, interpolation); + interpolated.x = Scop::Lerp(from.x, to.x, interpolation); + interpolated.y = Scop::Lerp(from.y, to.y, interpolation); + interpolated.z = Scop::Lerp(from.z, to.z, interpolation); + + return interpolated; + } + + template + Quat Quat::LookAt(const Vec3& forward, const Vec3& up) + { + // From https://gamedev.stackexchange.com/questions/53129/quaternion-look-at-with-up-vector + Vec3 forward_w = Vec3::Forward(); + Vec3 axis = Vec3::CrossProduct(forward, forward_w); + RadianAngle angle = std::acos(Vec3::DotProduct(forward, forward_w)); + + Vec3 third = Vec3::CrossProduct(axis, forward_w); + if(Vec3::DotProduct(third, forward) < 0) + angle = -angle; + + Quat q1 = Quat(angle, axis); + + Vec3 up_l = q1 * up; + Vec3 right = Vec3::Normalize(Vec3::CrossProduct(forward, up)); + Vec3 up_w = Vec3::Normalize(Vec3::CrossProduct(right, forward)); + + Vec3 axis2 = Vec3::CrossProduct(up_l, up_w); + RadianAngle angle2 = std::acos(Vec3::DotProduct(forward, forward_w)); + + Quat q2 = Quat(angle2, axis2); + + return q2 * q1; + } + + template + Quat Quat::Normalize(const Quat& quat, T* length) + { + return quat.GetNormal(length); + } + + template + Quat Quat::RotationBetween(const Vec3& from, const Vec3& to) + { + T dot = from.DotProduct(to); + if(dot < T(-0.999999)) + { + Vec3 crossProduct; + if(from.DotProduct(Vec3::UnitX()) < T(0.999999)) + crossProduct = Vec3::UnitX().CrossProduct(from); + else + crossProduct = Vec3::UnitY().CrossProduct(from); + + crossProduct.Normalize(); + return Quat(Pi(), crossProduct); + } + else if(dot > T(0.999999)) + return Quat::Identity(); + else + { + T norm = std::sqrt(from.GetSquaredLength() * to.GetSquaredLength()); + Vec3 crossProduct = from.CrossProduct(to); + + return Quat(norm + dot, crossProduct.x, crossProduct.y, crossProduct.z).GetNormal(); + } + } + + template + Quat Quat::RotateTowards(const Quat& from, const Quat& to, RadianAngle maxRotation) + { + RadianAngle rotationBetween = AngleBetween(from, to); + if(rotationBetween < maxRotation) + return to; + + return Slerp(from, to, std::min(maxRotation.value / rotationBetween.value), 1.f); + } + + template + Quat Quat::Mirror(Quat quat, const Vec3& axis) + { + T x = std::copysign(T(1.0), axis.x); + T y = std::copysign(T(1.0), axis.y); + T z = std::copysign(T(1.0), axis.z); + + quat.x = y * z * quat.x; + quat.y = x * z * quat.y; + quat.z = x * y * quat.z; + + return quat; + } + + template + Quat Quat::Slerp(const Quat& from, const Quat& to, T interpolation) + { + Quat q; + + T cosOmega = from.DotProduct(to); + if(cosOmega < T(0.0)) + { + // We invert everything + q = Quat(-to.w, -to.x, -to.y, -to.z); + cosOmega = -cosOmega; + } + else + q = Quat(to); + + T k0, k1; + if(cosOmega > T(0.9999)) + { + // Linear interpolation to avoid division by zero + k0 = T(1.0) - interpolation; + k1 = interpolation; + } + else + { + T sinOmega = std::sqrt(T(1.0) - cosOmega*cosOmega); + T omega = std::atan2(sinOmega, cosOmega); + + // To avoid two divisions + sinOmega = T(1.0)/sinOmega; + + k0 = std::sin((T(1.0) - interpolation) * omega) * sinOmega; + k1 = std::sin(interpolation*omega) * sinOmega; + } + + Quat result(k0 * from.w, k0 * from.x, k0 * from.y, k0 * from.z); + return result += q * k1; + } + + template + constexpr Quat Quat::Zero() + { + return Quat(0, 0, 0, 0); + } + + template + std::ostream& operator<<(std::ostream& out, const Quat& quat) + { + return out << "Quat(" << quat.w << " | " << quat.x << ", " << quat.y << ", " << quat.z << ')'; + } +} diff --git a/runtime/Includes/Maths/Vec2.h b/runtime/Includes/Maths/Vec2.h new file mode 100755 index 0000000..ec6f7a5 --- /dev/null +++ b/runtime/Includes/Maths/Vec2.h @@ -0,0 +1,116 @@ +#ifndef __SCOP_VEC2__ +#define __SCOP_VEC2__ + +#include +#include +#include +#include + +#include + +namespace Scop +{ + template class Vec3; + template class Vec4; + + template + struct Vec2 + { + union { T x, r, s; }; + union { T y, g, t; }; + + constexpr Vec2() = default; + constexpr Vec2(T X, T Y); + constexpr explicit Vec2(T scale); + template constexpr explicit Vec2(const Vec2& vec); + constexpr Vec2(const Vec2&) = default; + constexpr Vec2(Vec2&&) = default; + constexpr explicit Vec2(const Vec3& vec); + constexpr explicit Vec2(const Vec4& vec); + + T AbsDotProduct(const Vec2& vec) const; + constexpr bool ApproxEqual(const Vec2& vec, T max_difference = std::numeric_limits::epsilon()) const; + + template U Distance(const Vec2& vec) const; + constexpr T DotProduct(const Vec2& vec) const; + + template T GetLength() const; + Vec2 GetNormal(T* length = nullptr) const; + constexpr T GetSquaredLength() const; + + constexpr Vec2& Maximize(const Vec2& vec); + constexpr Vec2& Minimize(const Vec2& vec); + + Vec2& Normalize(T* length = nullptr); + + constexpr T SquaredDistance(const Vec2& vec) const; + + std::string ToString() const; + + constexpr T& operator[](std::size_t i); + constexpr T operator[](std::size_t i) const; + + constexpr const Vec2& operator+() const; + constexpr Vec2 operator-() const; + + constexpr Vec2 operator+(const Vec2& vec) const; + constexpr Vec2 operator-(const Vec2& vec) const; + constexpr Vec2 operator*(const Vec2& vec) const; + constexpr Vec2 operator*(T scale) const; + constexpr Vec2 operator/(const Vec2& vec) const; + constexpr Vec2 operator/(T scale) const; + constexpr Vec2 operator%(const Vec2& vec) const; + constexpr Vec2 operator%(T mod) const; + + constexpr Vec2& operator=(const Vec2&) = default; + constexpr Vec2& operator=(Vec2&&) = default; + + constexpr Vec2& operator+=(const Vec2& vec); + constexpr Vec2& operator-=(const Vec2& vec); + constexpr Vec2& operator*=(const Vec2& vec); + constexpr Vec2& operator*=(T scale); + constexpr Vec2& operator/=(const Vec2& vec); + constexpr Vec2& operator/=(T scale); + constexpr Vec2& operator%=(const Vec2& vec); + constexpr Vec2& operator%=(T mod); + + constexpr bool operator==(const Vec2& vec) const; + constexpr bool operator!=(const Vec2& vec) const; + constexpr bool operator<(const Vec2& vec) const; + constexpr bool operator<=(const Vec2& vec) const; + constexpr bool operator>(const Vec2& vec) const; + constexpr bool operator>=(const Vec2& vec) const; + + static constexpr Vec2 Apply(T(*func)(T), const Vec2& vec); + static constexpr bool ApproxEqual(const Vec2& lhs, const Vec2& rhs, T max_difference = std::numeric_limits::epsilon()); + template static U Distance(const Vec2& vec1, const Vec2& vec2); + static constexpr T DotProduct(const Vec2& vec1, const Vec2& vec2); + static constexpr Vec2 Lerp(const Vec2& from, const Vec2& to, T interpolation); + static Vec2 Normalize(const Vec2& vec); + static constexpr Vec2 Unit(); + static constexpr Vec2 UnitX(); + static constexpr Vec2 UnitY(); + static constexpr Vec2 Zero(); + + ~Vec2() = default; + }; + + using Vec2d = Vec2; + using Vec2f = Vec2; + using Vec2i = Vec2; + using Vec2ui = Vec2; + using Vec2i32 = Vec2; + using Vec2i64 = Vec2; + using Vec2ui32 = Vec2; + using Vec2ui64 = Vec2; + + template std::ostream& operator<<(std::ostream& out, const Vec2& vec); + + template constexpr Vec2 operator*(T scale, const Vec2& vec); + template constexpr Vec2 operator/(T scale, const Vec2& vec); + template constexpr Vec2 operator%(T mod, const Vec2& vec); +} + +#include + +#endif // __AK_VEC2__ diff --git a/runtime/Includes/Maths/Vec2.inl b/runtime/Includes/Maths/Vec2.inl new file mode 100755 index 0000000..1671da8 --- /dev/null +++ b/runtime/Includes/Maths/Vec2.inl @@ -0,0 +1,388 @@ +#pragma once + +#include + +namespace Scop +{ + template + constexpr Vec2::Vec2(T X, T Y) : x(X), y(Y) {} + + template + constexpr Vec2::Vec2(T scale) : x(scale), y(scale) {} + + template + template + constexpr Vec2::Vec2(const Vec2& vec) : x(static_cast(vec.x)), y(static_cast(vec.y)) {} + + template + constexpr Vec2::Vec2(const Vec3& vec) : x(vec.x), y(vec.y) {} + + template + constexpr Vec2::Vec2(const Vec4& vec) : x(vec.x), y(vec.y) {} + + template + T Vec2::AbsDotProduct(const Vec2& vec) const + { + return std::abs(x * vec.x) + std::abs(y * vec.y); + } + + template + constexpr bool Vec2::ApproxEqual(const Vec2& vec, T maxDifference) const + { + return NumberEquals(x, vec.x, maxDifference) && NumberEquals(y, vec.y, maxDifference); + } + + template + template + U Vec2::Distance(const Vec2& vec) const + { + return static_cast(std::sqrt(SquaredDistance(vec))); + } + + template + constexpr T Vec2::DotProduct(const Vec2& vec) const + { + return x * vec.x + y * vec.y; + } + + template + template + T Vec2::GetLength() const + { + return static_cast(std::sqrt(static_cast(GetSquaredLength()))); + } + + template + Vec2 Vec2::GetNormal(T* length) const + { + Vec2 vec(*this); + vec.Normalize(length); + return vec; + } + + template + constexpr T Vec2::GetSquaredLength() const + { + return x * x + y * y; + } + + template + constexpr Vec2& Vec2::Maximize(const Vec2& vec) + { + if(vec.x > x) + x = vec.x; + if(vec.y > y) + y = vec.y; + return *this; + } + + template + constexpr Vec2& Vec2::Minimize(const Vec2& vec) + { + if(vec.x < x) + x = vec.x; + if(vec.y < y) + y = vec.y; + return *this; + } + + template + Vec2& Vec2::Normalize(T* length) + { + T norm = GetLength(); + if(norm > T(0.0)) + { + T invNorm = T(1.0) / norm; + x *= invNorm; + y *= invNorm; + } + if(length) + *length = norm; + return *this; + } + + template + constexpr T Vec2::SquaredDistance(const Vec2& vec) const + { + return (*this - vec).GetSquaredLength(); + } + + template + std::string Vec2::ToString() const + { + return "Vec2(" + std::to_string(x) + ", " + std::to_string(y) + ')'; + } + + template + constexpr T& Vec2::operator[](std::size_t i) + { + Scop::Assert(i < 2, "index out of range"); + return *(&x + i); + } + + template + constexpr T Vec2::operator[](std::size_t i) const + { + Scop::Assert(i < 2, "index out of range"); + return *(&x + i); + } + + template + constexpr const Vec2& Vec2::operator+() const + { + return *this; + } + + template + constexpr Vec2 Vec2::operator-() const + { + return Vec2(-x, -y); + } + + template + constexpr Vec2 Vec2::operator+(const Vec2& vec) const + { + return Vec2(x + vec.x, y + vec.y); + } + + template + constexpr Vec2 Vec2::operator-(const Vec2& vec) const + { + return Vec2(x - vec.x, y - vec.y); + } + + template + constexpr Vec2 Vec2::operator*(const Vec2& vec) const + { + return Vec2(x * vec.x, y * vec.y); + } + + template + constexpr Vec2 Vec2::operator*(T scale) const + { + return Vec2(x * scale, y * scale); + } + + template + constexpr Vec2 Vec2::operator/(const Vec2& vec) const + { + return Vec2(x / vec.x, y / vec.y); + } + + template + constexpr Vec2 Vec2::operator/(T scale) const + { + return Vec2(x / scale, y / scale); + } + + template + constexpr Vec2 Vec2::operator%(const Vec2& vec) const + { + return Vec2(Mod(x, vec.x), Mod(y, vec.y)); + } + + template + constexpr Vec2 Vec2::operator%(T mod) const + { + return Vec2(Mod(x, mod), Mod(y, mod)); + } + + template + constexpr Vec2& Vec2::operator+=(const Vec2& vec) + { + x += vec.x; + y += vec.y; + + return *this; + } + + template + constexpr Vec2& Vec2::operator-=(const Vec2& vec) + { + x -= vec.x; + y -= vec.y; + + return *this; + } + + template + constexpr Vec2& Vec2::operator*=(const Vec2& vec) + { + x *= vec.x; + y *= vec.y; + + return *this; + } + + template + constexpr Vec2& Vec2::operator*=(T scale) + { + x *= scale; + y *= scale; + + return *this; + } + + template + constexpr Vec2& Vec2::operator/=(const Vec2& vec) + { + x /= vec.x; + y /= vec.y; + + return *this; + } + + template + constexpr Vec2& Vec2::operator/=(T scale) + { + x /= scale; + y /= scale; + + return *this; + } + + template + constexpr Vec2& Vec2::operator%=(const Vec2& vec) + { + x = Mod(x, vec.x); + y = Mod(y, vec.y); + + return *this; + } + + template + constexpr Vec2& Vec2::operator%=(T value) + { + x = Mod(x, value); + y = Mod(y, value); + + return *this; + } + + template + constexpr bool Vec2::operator==(const Vec2& vec) const + { + return x == vec.x && y == vec.y; + } + + template + constexpr bool Vec2::operator!=(const Vec2& vec) const + { + return !operator==(vec); + } + + template + constexpr bool Vec2::operator<(const Vec2& vec) const + { + if (x != vec.x) + return x < vec.x; + + return y < vec.y; + } + + template + constexpr bool Vec2::operator<=(const Vec2& vec) const + { + if (x != vec.x) + return x < vec.x; + + return y <= vec.y; + } + + template + constexpr bool Vec2::operator>(const Vec2& vec) const + { + if (x != vec.x) + return x > vec.x; + + return y > vec.y; + } + + template + constexpr bool Vec2::operator>=(const Vec2& vec) const + { + if (x != vec.x) + return x > vec.x; + + return y >= vec.y; + } + + template + constexpr Vec2 Vec2::Apply(T(*func)(T), const Vec2& vec) + { + return Vec2(func(vec.x), func(vec.y)); + } + + template + constexpr bool Vec2::ApproxEqual(const Vec2& lhs, const Vec2& rhs, T maxDifference) + { + return lhs.ApproxEqual(rhs, maxDifference); + } + + template + template + U Vec2::Distance(const Vec2& vec1, const Vec2& vec2) + { + return vec1.Distance(vec2); + } + + template + constexpr T Vec2::DotProduct(const Vec2& vec1, const Vec2& vec2) + { + return vec1.DotProduct(vec2); + } + + template + Vec2 Vec2::Normalize(const Vec2& vec) + { + return vec.GetNormal(); + } + + template + constexpr Vec2 Vec2::Unit() + { + return Vec2(1, 1); + } + + template + constexpr Vec2 Vec2::UnitX() + { + return Vec2(1, 0); + } + + template + constexpr Vec2 Vec2::UnitY() + { + return Vec2(0, 1); + } + + template + constexpr Vec2 Vec2::Zero() + { + return Vec2(0, 0); + } + + template + std::ostream& operator<<(std::ostream& out, const Vec2& vec) + { + return out << "Vec2(" << vec.x << ", " << vec.y << ')'; + } + + template + constexpr Vec2 operator*(T scale, const Vec2& vec) + { + return Vec2(scale * vec.x, scale * vec.y); + } + + template + constexpr Vec2 operator/(T scale, const Vec2& vec) + { + return Vec2(scale / vec.x, scale / vec.y); + } + + template + constexpr Vec2 operator%(T mod, const Vec2& vec) + { + return Vec2(Mod(mod, vec.x), Mod(mod, vec.y)); + } +} diff --git a/runtime/Includes/Maths/Vec3.h b/runtime/Includes/Maths/Vec3.h new file mode 100755 index 0000000..775431f --- /dev/null +++ b/runtime/Includes/Maths/Vec3.h @@ -0,0 +1,133 @@ +#ifndef __SCOP_VEC3__ +#define __SCOP_VEC3__ + +#include +#include +#include +#include + +#include + +namespace Scop +{ + template class Vec2; + template class Vec4; + + template + struct Vec3 + { + union { T x, r, s; }; + union { T y, g, t; }; + union { T z, b, p; }; + + constexpr Vec3() = default; + constexpr Vec3(T X, T Y, T Z); + constexpr Vec3(T X, const Vec2& vec); + constexpr explicit Vec3(T scale); + constexpr Vec3(const Vec2& vec, T Z = 0.0); + template constexpr explicit Vec3(const Vec3& vec); + constexpr Vec3(const Vec3&) = default; + constexpr Vec3(Vec3&&) = default; + constexpr explicit Vec3(const Vec4& vec); + + T AbsDotProduct(const Vec3& vec) const; + constexpr bool ApproxEqual(const Vec3& vec, T max_difference = std::numeric_limits::epsilon()) const; + + constexpr Vec3 CrossProduct(const Vec3& vec) const; + + template U Distance(const Vec3& vec) const; + constexpr T DotProduct(const Vec3& vec) const; + + Vec3 GetAbs() const; + template U GetLength() const; + Vec3 GetNormal(T* length = nullptr) const; + constexpr T GetSquaredLength() const; + + constexpr Vec3& Maximize(const Vec3& vec); + constexpr Vec3& Minimize(const Vec3& vec); + + Vec3& Normalize(T* length = nullptr); + + constexpr T SquaredDistance(const Vec3& vec) const; + + std::string ToString() const; + + constexpr T& operator[](std::size_t i); + constexpr const T& operator[](std::size_t i) const; + + constexpr const Vec3& operator+() const; + constexpr Vec3 operator-() const; + + constexpr Vec3 operator+(const Vec3& vec) const; + constexpr Vec3 operator-(const Vec3& vec) const; + constexpr Vec3 operator*(const Vec3& vec) const; + constexpr Vec3 operator*(T scale) const; + constexpr Vec3 operator/(const Vec3& vec) const; + constexpr Vec3 operator/(T scale) const; + constexpr Vec3 operator%(const Vec3& vec) const; + constexpr Vec3 operator%(T mod) const; + + constexpr Vec3& operator=(const Vec3&) = default; + constexpr Vec3& operator=(Vec3&&) = default; + + constexpr Vec3& operator+=(const Vec3& vec); + constexpr Vec3& operator-=(const Vec3& vec); + constexpr Vec3& operator*=(const Vec3& vec); + constexpr Vec3& operator*=(T scale); + constexpr Vec3& operator/=(const Vec3& vec); + constexpr Vec3& operator/=(T scale); + constexpr Vec3& operator%=(const Vec3& vec); + constexpr Vec3& operator%=(T mod); + + constexpr bool operator==(const Vec3& vec) const; + constexpr bool operator!=(const Vec3& vec) const; + constexpr bool operator<(const Vec3& vec) const; + constexpr bool operator<=(const Vec3& vec) const; + constexpr bool operator>(const Vec3& vec) const; + constexpr bool operator>=(const Vec3& vec) const; + + static constexpr Vec3 Apply(T(*func)(T), const Vec3& vec); + static constexpr bool ApproxEqual(const Vec3& lhs, const Vec3& rhs, T max_difference = std::numeric_limits::epsilon()); + static constexpr Vec3 Backward(); + static constexpr Vec3 Clamp(const Vec3& vec, const Vec3& min, const Vec3& max); + static constexpr Vec3 CrossProduct(const Vec3& vec1, const Vec3& vec2); + template static U Distance(const Vec3& vec1, const Vec3& vec2); + static constexpr T DotProduct(const Vec3& vec1, const Vec3& vec2); + static constexpr Vec3 Down(); + static constexpr Vec3 Forward(); + static constexpr Vec3 Left(); + static constexpr Vec3 Max(const Vec3& lhs, const Vec3& rhs); + static constexpr Vec3 Min(const Vec3& lhs, const Vec3& rhs); + static Vec3 Normalize(const Vec3& vec); + static constexpr Vec3 Right(); + static constexpr T SquaredDistance(const Vec3& vec1, const Vec3& vec2); + static constexpr Vec3 Unit(); + static constexpr Vec3 UnitX(); + static constexpr Vec3 UnitY(); + static constexpr Vec3 UnitZ(); + static constexpr Vec3 Up(); + static constexpr Vec3 Zero(); + + ~Vec3() = default; + }; + + using Vec3b = Vec3; + using Vec3d = Vec3; + using Vec3f = Vec3; + using Vec3i = Vec3; + using Vec3ui = Vec3; + using Vec3i32 = Vec3; + using Vec3i64 = Vec3; + using Vec3ui32 = Vec3; + using Vec3ui64 = Vec3; + + template std::ostream& operator<<(std::ostream& out, const Vec3& vec); + + template constexpr Vec3 operator*(T scale, const Vec3& vec); + template constexpr Vec3 operator/(T scale, const Vec3& vec); + template constexpr Vec3 operator%(T scale, const Vec3& vec); +} + +#include + +#endif // __AK_VEC3__ diff --git a/runtime/Includes/Maths/Vec3.inl b/runtime/Includes/Maths/Vec3.inl new file mode 100755 index 0000000..73c73ff --- /dev/null +++ b/runtime/Includes/Maths/Vec3.inl @@ -0,0 +1,509 @@ +#pragma once + +#include + +namespace Scop +{ + template + constexpr Vec3::Vec3(T X, T Y, T Z) : x(X), y(Y), z(Z) {} + + template + constexpr Vec3::Vec3(T X, const Vec2& vec) : x(X), y(vec.x), z(vec.y) {} + + template + constexpr Vec3::Vec3(T scale) : x(scale), y(scale), z(scale) {} + + template + constexpr Vec3::Vec3(const Vec2& vec, T Z) : x(vec.x), y(vec.y), z(Z) {} + + template + template + constexpr Vec3::Vec3(const Vec3& vec) : x(static_cast(vec.x)), y(static_cast(vec.y)), z(static_cast(vec.z)) {} + + template + constexpr Vec3::Vec3(const Vec4& vec) : x(vec.x), y(vec.y), z(vec.z) {} + + template + T Vec3::AbsDotProduct(const Vec3& vec) const + { + return std::abs(x * vec.x) + std::abs(y * vec.y) + std::abs(z * vec.z); + } + + template + constexpr bool Vec3::ApproxEqual(const Vec3& vec, T maxDifference) const + { + return NumberEquals(x, vec.x, maxDifference) && NumberEquals(y, vec.y, maxDifference) && NumberEquals(z, vec.z, maxDifference); + } + + template + constexpr Vec3 Vec3::CrossProduct(const Vec3& vec) const + { + return Vec3(y * vec.z - z * vec.y, z * vec.x - x * vec.z, x * vec.y - y * vec.x); + } + + template + template + U Vec3::Distance(const Vec3& vec) const + { + return static_cast(std::sqrt(static_cast(SquaredDistance(vec)))); + } + + template + constexpr T Vec3::DotProduct(const Vec3& vec) const + { + return x * vec.x + y * vec.y + z * vec.z; + } + + template + Vec3 Vec3::GetAbs() const + { + return Vec3(std::abs(x), std::abs(y), std::abs(z)); + } + + template + template + U Vec3::GetLength() const + { + return static_cast(std::sqrt(static_cast(GetSquaredLength()))); + } + + template + Vec3 Vec3::GetNormal(T* length) const + { + Vec3 vec(*this); + vec.Normalize(length); + + return vec; + } + + template + constexpr T Vec3::GetSquaredLength() const + { + return x*x + y*y + z*z; + } + + template + constexpr Vec3& Vec3::Maximize(const Vec3& vec) + { + if (vec.x > x) + x = vec.x; + + if (vec.y > y) + y = vec.y; + + if (vec.z > z) + z = vec.z; + + return *this; + } + + template + constexpr Vec3& Vec3::Minimize(const Vec3& vec) + { + if (vec.x < x) + x = vec.x; + + if (vec.y < y) + y = vec.y; + + if (vec.z < z) + z = vec.z; + + return *this; + } + + template + Vec3& Vec3::Normalize(T* length) + { + T norm = GetLength(); + if (norm > T(0.0)) + { + T invNorm = T(1.0) / norm; + x *= invNorm; + y *= invNorm; + z *= invNorm; + } + + if (length) + *length = norm; + + return *this; + } + + template + constexpr T Vec3::SquaredDistance(const Vec3& vec) const + { + return (*this - vec).GetSquaredLength(); + } + + template + std::string Vec3::ToString() const + { + return "Vec3(" + std::to_string(x) + ", " + std::to_string(y) + ", " + std::to_string(z) + ')'; + } + + template + constexpr T& Vec3::operator[](std::size_t i) + { + Scop::Assert(i < 3, "index out of range"); + return *(&x + i); + } + + template + constexpr const T& Vec3::operator[](std::size_t i) const + { + Scop::Assert(i < 3, "index out of range"); + return *(&x + i); + } + + template + constexpr const Vec3& Vec3::operator+() const + { + return *this; + } + + template + constexpr Vec3 Vec3::operator-() const + { + return Vec3(-x, -y, -z); + } + template + constexpr Vec3 Vec3::operator+(const Vec3& vec) const + { + return Vec3(x + vec.x, y + vec.y, z + vec.z); + } + + template + constexpr Vec3 Vec3::operator-(const Vec3& vec) const + { + return Vec3(x - vec.x, y - vec.y, z - vec.z); + } + + template + constexpr Vec3 Vec3::operator*(const Vec3& vec) const + { + return Vec3(x * vec.x, y * vec.y, z * vec.z); + } + + template + constexpr Vec3 Vec3::operator*(T scale) const + { + return Vec3(x * scale, y * scale, z * scale); + } + + template + constexpr Vec3 Vec3::operator/(const Vec3& vec) const + { + return Vec3(x / vec.x, y / vec.y, z / vec.z); + } + + template + constexpr Vec3 Vec3::operator/(T scale) const + { + return Vec3(x / scale, y / scale, z / scale); + } + + template + constexpr Vec3 Vec3::operator%(const Vec3& vec) const + { + return Vec3(Mod(x, vec.x), Mod(y, vec.y), Mod(z, vec.z)); + } + + template + constexpr Vec3 Vec3::operator%(T mod) const + { + return Vec3(Mod(x, mod), Mod(y, mod), Mod(z, mod)); + } + + template + constexpr Vec3& Vec3::operator+=(const Vec3& vec) + { + x += vec.x; + y += vec.y; + z += vec.z; + + return *this; + } + + template + constexpr Vec3& Vec3::operator-=(const Vec3& vec) + { + x -= vec.x; + y -= vec.y; + z -= vec.z; + + return *this; + } + + template + constexpr Vec3& Vec3::operator*=(const Vec3& vec) + { + x *= vec.x; + y *= vec.y; + z *= vec.z; + + return *this; + } + + template + constexpr Vec3& Vec3::operator*=(T scale) + { + x *= scale; + y *= scale; + z *= scale; + + return *this; + } + + template + constexpr Vec3& Vec3::operator/=(const Vec3& vec) + { + x /= vec.x; + y /= vec.y; + z /= vec.z; + + return *this; + } + + template + constexpr Vec3& Vec3::operator/=(T scale) + { + x /= scale; + y /= scale; + z /= scale; + + return *this; + } + + template + constexpr Vec3& Vec3::operator%=(const Vec3& vec) + { + x = Mod(x, vec.x); + y = Mod(y, vec.y); + z = Mod(z, vec.z); + + return *this; + } + + template + constexpr Vec3& Vec3::operator%=(T mod) + { + x = Mod(x, mod); + y = Mod(y, mod); + z = Mod(z, mod); + + return *this; + } + + template + constexpr bool Vec3::operator==(const Vec3& vec) const + { + return x == vec.x && y == vec.y && z == vec.z; + } + + template + constexpr bool Vec3::operator!=(const Vec3& vec) const + { + return !operator==(vec); + } + + template + constexpr bool Vec3::operator<(const Vec3& vec) const + { + if (x != vec.x) + return x < vec.x; + + if (y != vec.y) + return y < vec.y; + + return z < vec.z; + } + + template + constexpr bool Vec3::operator<=(const Vec3& vec) const + { + if (x != vec.x) + return x < vec.x; + + if (y != vec.y) + return y < vec.y; + + return z <= vec.z; + } + + template + constexpr bool Vec3::operator>(const Vec3& vec) const + { + if (x != vec.x) + return x > vec.x; + + if (y != vec.y) + return y > vec.y; + + return z > vec.z; + } + + template + constexpr bool Vec3::operator>=(const Vec3& vec) const + { + if (x != vec.x) + return x > vec.x; + + if (y != vec.y) + return y > vec.y; + + return z >= vec.z; + } + + template + constexpr Vec3 Vec3::Apply(T(*func)(T), const Vec3& vec) + { + return Vec3(func(vec.x), func(vec.y), func(vec.z)); + } + + template + constexpr bool Vec3::ApproxEqual(const Vec3& lhs, const Vec3& rhs, T maxDifference) + { + return lhs.ApproxEqual(rhs, maxDifference); + } + + template + constexpr Vec3 Vec3::CrossProduct(const Vec3& vec1, const Vec3& vec2) + { + return vec1.CrossProduct(vec2); + } + + template + constexpr T Vec3::DotProduct(const Vec3& vec1, const Vec3& vec2) + { + return vec1.DotProduct(vec2); + } + + template + constexpr Vec3 Vec3::Backward() + { + return Vec3(0, 0, 1); + } + + template + template + U Vec3::Distance(const Vec3& vec1, const Vec3& vec2) + { + return vec1.Distance(vec2); + } + + template + constexpr Vec3 Vec3::Down() + { + return Vec3(0, -1, 0); + } + + template + constexpr Vec3 Vec3::Forward() + { + return Vec3(0, 0, -1); + } + + template + constexpr Vec3 Vec3::Left() + { + return Vec3(-1, 0, 0); + } + + template + constexpr Vec3 Vec3::Max(const Vec3& lhs, const Vec3& rhs) + { + Vec3 max = lhs; + max.Maximize(rhs); + + return max; + } + + template + constexpr Vec3 Vec3::Min(const Vec3& lhs, const Vec3& rhs) + { + Vec3 min = lhs; + min.Minimize(rhs); + + return min; + } + + template + Vec3 Vec3::Normalize(const Vec3& vec) + { + return vec.GetNormal(); + } + + template + constexpr Vec3 Vec3::Right() + { + return Vec3(1, 0, 0); + } + + template + constexpr T Vec3::SquaredDistance(const Vec3& vec1, const Vec3& vec2) + { + return vec1.SquaredDistance(vec2); + } + + template + constexpr Vec3 Vec3::Unit() + { + return Vec3(1); + } + + template + constexpr Vec3 Vec3::UnitX() + { + return Vec3(1, 0, 0); + } + + template + constexpr Vec3 Vec3::UnitY() + { + return Vec3(0, 1, 0); + } + + template + constexpr Vec3 Vec3::UnitZ() + { + return Vec3(0, 0, 1); + } + + template + constexpr Vec3 Vec3::Up() + { + return Vec3(0, 1, 0); + } + + template + constexpr Vec3 Vec3::Zero() + { + return Vec3(0, 0, 0); + } + + template + std::ostream& operator<<(std::ostream& out, const Vec3& vec) + { + return out << "Vec3(" << vec.x << ", " << vec.y << ", " << vec.z << ')'; + } + + template + constexpr Vec3 operator*(T scale, const Vec3& vec) + { + return Vec3(scale * vec.x, scale * vec.y, scale * vec.z); + } + + template + constexpr Vec3 operator/(T scale, const Vec3& vec) + { + return Vec3(scale / vec.x, scale / vec.y, scale / vec.z); + } + + template + constexpr Vec3 operator%(T mod, const Vec3& vec) + { + return Vec3(Mod(mod, vec.x), Mod(mod, vec.y), Mod(mod, vec.z)); + } +} + diff --git a/runtime/Includes/Maths/Vec4.h b/runtime/Includes/Maths/Vec4.h new file mode 100755 index 0000000..e8bff3a --- /dev/null +++ b/runtime/Includes/Maths/Vec4.h @@ -0,0 +1,115 @@ +#ifndef __SCOP_VEC4__ +#define __SCOP_VEC4__ + +#include +#include +#include +#include + +#include + +namespace Scop +{ + template class Vec2; + template class Vec3; + + template + struct Vec4 + { + union { T x, r, s; }; + union { T y, g, t; }; + union { T z, b, p; }; + union { T w, a, q; }; + + constexpr Vec4() = default; + constexpr Vec4(T X, T Y, T Z, T W = 1.0); + constexpr Vec4(T X, T Y, const Vec2& vec); + constexpr Vec4(T X, const Vec2& vec, T W); + constexpr Vec4(T X, const Vec3& vec); + constexpr explicit Vec4(T scale); + constexpr Vec4(const Vec2& vec, T Z = 0.0, T W = 1.0); + constexpr Vec4(const Vec3& vec, T W = 1.0); + template constexpr explicit Vec4(const Vec4& vec); + constexpr Vec4(const Vec4&) = default; + constexpr Vec4(Vec4&&) = default; + + T AbsDotProduct(const Vec4& vec) const; + constexpr bool ApproxEqual(const Vec4& vec, T max_difference = std::numeric_limits::epsilon()) const; + + constexpr T DotProduct(const Vec4& vec) const; + + Vec4 GetNormal(T* length = nullptr) const; + + constexpr Vec4& Maximize(const Vec4& vec); + constexpr Vec4& Minimize(const Vec4& vec); + + Vec4& Normalize(T* length = nullptr); + + std::string ToString() const; + + constexpr Vec4& operator=(const Vec4&) = default; + constexpr Vec4& operator=(Vec4&&) = default; + + constexpr T& operator[](std::size_t i); + constexpr const T& operator[](std::size_t i) const; + + constexpr const Vec4& operator+() const; + constexpr Vec4 operator-() const; + + constexpr Vec4 operator+(const Vec4& vec) const; + constexpr Vec4 operator-(const Vec4& vec) const; + constexpr Vec4 operator*(const Vec4& vec) const; + constexpr Vec4 operator*(T scale) const; + constexpr Vec4 operator/(const Vec4& vec) const; + constexpr Vec4 operator/(T scale) const; + constexpr Vec4 operator%(const Vec4& vec) const; + constexpr Vec4 operator%(T mod) const; + + constexpr Vec4& operator+=(const Vec4& vec); + constexpr Vec4& operator-=(const Vec4& vec); + constexpr Vec4& operator*=(const Vec4& vec); + constexpr Vec4& operator*=(T scale); + constexpr Vec4& operator/=(const Vec4& vec); + constexpr Vec4& operator/=(T scale); + constexpr Vec4& operator%=(const Vec4& vec); + constexpr Vec4& operator%=(T mod); + + constexpr bool operator==(const Vec4& vec) const; + constexpr bool operator!=(const Vec4& vec) const; + constexpr bool operator<(const Vec4& vec) const; + constexpr bool operator<=(const Vec4& vec) const; + constexpr bool operator>(const Vec4& vec) const; + constexpr bool operator>=(const Vec4& vec) const; + + static constexpr Vec4 Apply(T(*func)(T), const Vec4& vec); + static constexpr bool ApproxEqual(const Vec4& lhs, const Vec4& rhs, T max_difference = std::numeric_limits::epsilon()); + static constexpr T DotProduct(const Vec4& vec1, const Vec4& vec2); + static Vec4 Normalize(const Vec4& vec); + static constexpr Vec4 UnitX(); + static constexpr Vec4 UnitY(); + static constexpr Vec4 UnitZ(); + static constexpr Vec4 Zero(); + + ~Vec4() = default; + }; + + using Vec4d = Vec4; + using Vec4f = Vec4; + using Vec4i = Vec4; + using Vec4ui = Vec4; + using Vec4i32 = Vec4; + using Vec4i64 = Vec4; + using Vec4ui32 = Vec4; + using Vec4ui64 = Vec4; + + template std::ostream& operator<<(std::ostream& out, const Vec4& vec); + + template constexpr Vec4 operator*(T scale, const Vec4& vec); + template constexpr Vec4 operator/(T scale, const Vec4& vec); + template constexpr Vec4 operator%(T mod, const Vec4& vec); +} + +#include + +#endif // __AK_VEC4__ + diff --git a/runtime/Includes/Maths/Vec4.inl b/runtime/Includes/Maths/Vec4.inl new file mode 100755 index 0000000..7f016d7 --- /dev/null +++ b/runtime/Includes/Maths/Vec4.inl @@ -0,0 +1,424 @@ +#pragma once + +#include + +namespace Scop +{ + template + constexpr Vec4::Vec4(T X, T Y, T Z, T W) : x(X), y(Y), z(Z), w(W) {} + + template + constexpr Vec4::Vec4(T X, T Y, const Vec2& vec) : x(X), y(Y), z(vec.x), w(vec.y) {} + + template + constexpr Vec4::Vec4(T X, const Vec2& vec, T W) : x(X), y(vec.x), z(vec.y), w(W) {} + + template + constexpr Vec4::Vec4(T X, const Vec3& vec) : x(X), y(vec.x), z(vec.y), w(vec.z) {} + + template + constexpr Vec4::Vec4(T scale) : x(scale), y(scale), z(scale), w(scale) {} + + template + constexpr Vec4::Vec4(const Vec2& vec, T Z, T W) : x(vec.x), y(vec.y), z(Z), w(W) {} + + template + constexpr Vec4::Vec4(const Vec3& vec, T W) : x(vec.x), y(vec.y), z(vec.z), w(W) {} + + template + template + constexpr Vec4::Vec4(const Vec4& vec) : x(static_cast(vec.x)), y(static_cast(vec.y)), z(static_cast(vec.z)), w(static_cast(vec.w)) {} + + template + T Vec4::AbsDotProduct(const Vec4& vec) const + { + return std::abs(x * vec.x) + std::abs(y * vec.y) + std::abs(z * vec.z) + std::abs(w * vec.w); + } + + template + constexpr bool Vec4::ApproxEqual(const Vec4& vec, T maxDifference) const + { + return NumberEquals(x, vec.x, maxDifference) && NumberEquals(y, vec.y, maxDifference) && NumberEquals(z, vec.z, maxDifference) && NumberEquals(w, vec.w, maxDifference); + } + + template + constexpr T Vec4::DotProduct(const Vec4& vec) const + { + return x*vec.x + y*vec.y + z*vec.z + w*vec.w; + } + + template + Vec4 Vec4::GetNormal(T* length) const + { + Vec4 vec(*this); + vec.Normalize(length); + + return vec; + } + + template + constexpr Vec4& Vec4::Maximize(const Vec4& vec) + { + if (vec.x > x) + x = vec.x; + + if (vec.y > y) + y = vec.y; + + if (vec.z > z) + z = vec.z; + + if (vec.w > w) + w = vec.w; + + return *this; + } + + template + constexpr Vec4& Vec4::Minimize(const Vec4& vec) + { + if (vec.x < x) + x = vec.x; + + if (vec.y < y) + y = vec.y; + + if (vec.z < z) + z = vec.z; + + if (vec.w < w) + w = vec.w; + + return *this; + } + + template + Vec4& Vec4::Normalize(T* length) + { + T invLength = T(1.0) / w; + x *= invLength; + y *= invLength; + z *= invLength; + + if (length) + *length = w; + + w = T(1.0); + + return *this; + } + + template + std::string Vec4::ToString() const + { + std::ostringstream ss; + ss << *this; + + return ss.str(); + } + + template + constexpr T& Vec4::operator[](std::size_t i) + { + Scop::Assert(i < 4, "index out of range"); + return *(&x + i); + } + + template + constexpr const T& Vec4::operator[](std::size_t i) const + { + Scop::Assert(i < 4, "index out of range"); + return *(&x + i); + } + + template + constexpr const Vec4& Vec4::operator+() const + { + return *this; + } + + template + constexpr Vec4 Vec4::operator-() const + { + return Vec4(-x, -y, -z, -w); + } + template + constexpr Vec4 Vec4::operator+(const Vec4& vec) const + { + return Vec4(x + vec.x, y + vec.y, z + vec.z, w + vec.w); + } + template + constexpr Vec4 Vec4::operator-(const Vec4& vec) const + { + return Vec4(x - vec.x, y - vec.y, z - vec.z, w - vec.w); + } + + template + constexpr Vec4 Vec4::operator*(const Vec4& vec) const + { + return Vec4(x * vec.x, y * vec.y, z * vec.z, w * vec.w); + } + + template + constexpr Vec4 Vec4::operator*(T scale) const + { + return Vec4(x * scale, y * scale, z * scale, w * scale); + } + + template + constexpr Vec4 Vec4::operator/(const Vec4& vec) const + { + return Vec4(x / vec.x, y / vec.y, z / vec.z, w / vec.w); + } + + template + constexpr Vec4 Vec4::operator/(T scale) const + { + return Vec4(x / scale, y / scale, z / scale, w / scale); + } + + template + constexpr Vec4 Vec4::operator%(const Vec4& vec) const + { + return Vec4(Mod(x, vec.x), Mod(y, vec.y), Mod(z, vec.z), Mod(w, vec.w)); + } + + template + constexpr Vec4 Vec4::operator%(T mod) const + { + return Vec4(Mod(x, mod), Mod(y, mod), Mod(z, mod), Mod(z, mod)); + } + + template + constexpr Vec4& Vec4::operator+=(const Vec4& vec) + { + x += vec.x; + y += vec.y; + z += vec.z; + w += vec.w; + + return *this; + } + + template + constexpr Vec4& Vec4::operator-=(const Vec4& vec) + { + x -= vec.x; + y -= vec.y; + z -= vec.z; + w -= vec.w; + + return *this; + } + + template + constexpr Vec4& Vec4::operator*=(const Vec4& vec) + { + x *= vec.x; + y *= vec.y; + z *= vec.z; + w *= vec.w; + + return *this; + } + + template + constexpr Vec4& Vec4::operator*=(T scale) + { + x *= scale; + y *= scale; + z *= scale; + w *= scale; + + return *this; + } + + template + constexpr Vec4& Vec4::operator/=(const Vec4& vec) + { + x /= vec.x; + y /= vec.y; + z /= vec.z; + w /= vec.w; + + return *this; + } + + template + constexpr Vec4& Vec4::operator/=(T scale) + { + x /= scale; + y /= scale; + z /= scale; + w /= scale; + + return *this; + } + + template + constexpr Vec4& Vec4::operator%=(const Vec4& vec) + { + x = Mod(x, vec.x); + y = Mod(y, vec.y); + z = Mod(z, vec.z); + w = Mod(w, vec.w); + + return *this; + } + + template + constexpr Vec4& Vec4::operator%=(T mod) + { + x = Mod(x, mod); + y = Mod(y, mod); + z = Mod(z, mod); + w = Mod(w, mod); + + return *this; + } + + template + constexpr bool Vec4::operator==(const Vec4& vec) const + { + return x == vec.x && y == vec.y && z == vec.z && w == vec.w; + } + + template + constexpr bool Vec4::operator!=(const Vec4& vec) const + { + return !operator==(vec); + } + + template + constexpr bool Vec4::operator<(const Vec4& vec) const + { + if (x != vec.x) + return x < vec.x; + + if (y != vec.y) + return y < vec.y; + + if (z != vec.z) + return z < vec.z; + + return w < vec.w; + } + + template + constexpr bool Vec4::operator<=(const Vec4& vec) const + { + if (x != vec.x) + return x < vec.x; + + if (y != vec.y) + return y < vec.y; + + if (z != vec.z) + return z < vec.z; + + return w <= vec.w; + } + + template + constexpr bool Vec4::operator>(const Vec4& vec) const + { + if (x != vec.x) + return x > vec.x; + + if (y != vec.y) + return y > vec.y; + + if (z != vec.z) + return z > vec.z; + + return w > vec.w; + } + + template + constexpr bool Vec4::operator>=(const Vec4& vec) const + { + if (x != vec.x) + return x > vec.x; + + if (y != vec.y) + return y > vec.y; + + if (z != vec.z) + return z > vec.z; + + return w >= vec.w; + } + + template + constexpr Vec4 Vec4::Apply(T(*func)(T), const Vec4& vec) + { + return Vec4(func(vec.x), func(vec.y), func(vec.z), func(vec.w)); + } + + template + constexpr bool Vec4::ApproxEqual(const Vec4& lhs, const Vec4& rhs, T maxDifference) + { + return lhs.ApproxEqual(rhs, maxDifference); + } + + template + constexpr T Vec4::DotProduct(const Vec4& vec1, const Vec4& vec2) + { + return vec1.DotProduct(vec2); + } + + template + Vec4 Vec4::Normalize(const Vec4& vec) + { + return vec.GetNormal(); + } + + template + constexpr Vec4 Vec4::UnitX() + { + return Vec4(1, 0, 0, 1); + } + + template + constexpr Vec4 Vec4::UnitY() + { + return Vec4(0, 1, 0, 1); + } + + template + constexpr Vec4 Vec4::UnitZ() + { + return Vec4(0, 0, 1, 1); + } + + template + constexpr Vec4 Vec4::Zero() + { + return Vec4(0, 0, 0, 1); + } + + template + std::ostream& operator<<(std::ostream& out, const Vec4& vec) + { + return out << "Vec4(" << vec.x << ", " << vec.y << ", " << vec.z << ", " << vec.w << ')'; + } + + template + constexpr Vec4 operator*(T scale, const Vec4& vec) + { + return Vec4(scale * vec.x, scale * vec.y, scale * vec.z, scale * vec.w); + } + + template + constexpr Vec4 operator/(T scale, const Vec4& vec) + { + return Vec4(scale / vec.x, scale / vec.y, scale / vec.z, scale / vec.w); + } + + template + constexpr Vec4 operator%(T mod, const Vec4& vec) + { + return Vec4(Mod(mod, vec.x), Mod(mod, vec.y), Mod(mod, vec.z), Mod(mod, vec.w)); + } +} + diff --git a/runtime/Includes/Platform/Inputs.h b/runtime/Includes/Platform/Inputs.h index ca3d485..6e1929a 100644 --- a/runtime/Includes/Platform/Inputs.h +++ b/runtime/Includes/Platform/Inputs.h @@ -1,18 +1,7 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Inputs.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/05 16:27:35 by maldavid #+# #+# */ -/* Updated: 2024/07/05 20:35:09 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_INPUTS__ #define __MLX_INPUTS__ +#include #include #include diff --git a/runtime/Includes/Platform/Window.h b/runtime/Includes/Platform/Window.h index 30c3945..04ef60e 100644 --- a/runtime/Includes/Platform/Window.h +++ b/runtime/Includes/Platform/Window.h @@ -1,36 +1,30 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Window.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 21:53:12 by maldavid #+# #+# */ -/* Updated: 2024/05/25 16:11:00 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_WINDOW__ #define __MLX_WINDOW__ +#include + namespace mlx { class Window { public: - Window(std::size_t w, std::size_t h, const std::string& title); + Window(std::size_t w, std::size_t h, const std::string& title, bool hidden = false); - inline void* GetWindowHandle() const noexcept { return p_window; } + inline Handle GetWindowHandle() const noexcept { return p_window; } inline int GetWidth() const noexcept { return m_width; } inline int GetHeight() const noexcept { return m_height; } inline std::uint32_t GetID() const noexcept { return m_id; } + inline VkSurfaceKHR CreateVulkanSurface(VkInstance instance) const noexcept { return SDLManager::Get().CreateVulkanSurface(p_window, instance); } + inline std::vector GetRequiredVulkanInstanceExtentions() const noexcept { return SDLManager::Get().GetRequiredVulkanInstanceExtentions(p_window); } + inline Vec2ui GetVulkanDrawableSize() const noexcept { return SDLManager::Get().GetVulkanDrawableSize(p_window); } + void Destroy() noexcept; ~Window() = default; private: - void* p_window = nullptr; + Handle p_window = nullptr; std::uint32_t m_id = -1; int m_width = 0; int m_height = 0; diff --git a/runtime/Includes/PreCompiled.h b/runtime/Includes/PreCompiled.h index 6601fe9..0284a51 100644 --- a/runtime/Includes/PreCompiled.h +++ b/runtime/Includes/PreCompiled.h @@ -1,31 +1,20 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* PreCompiled.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/25 17:37:23 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:25:07 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_PRE_COMPILED_HEADER__ #define __MLX_PRE_COMPILED_HEADER__ #define VK_NO_PROTOTYPES -#define Window X11Window // fuck X11 +#define Window X11Window // f*ck X11 #include #include #include #include -#include #include #include +#include + #include #include #include @@ -40,8 +29,6 @@ #include #include #include -#include -#include #include #include #include @@ -82,5 +69,8 @@ #include #include #include +#include + +using Handle = void*; #endif diff --git a/runtime/Includes/Renderer/Buffer.h b/runtime/Includes/Renderer/Buffer.h new file mode 100644 index 0000000..24d7f31 --- /dev/null +++ b/runtime/Includes/Renderer/Buffer.h @@ -0,0 +1,83 @@ +#ifndef __MLX_GPU_BUFFER__ +#define __MLX_GPU_BUFFER__ + +#include +#include +#include + +namespace mlx +{ + class GPUBuffer + { + public: + GPUBuffer() = default; + + void Init(BufferType type, VkDeviceSize size, VkBufferUsageFlags usage, CPUBuffer data); + void Destroy() noexcept; + + bool CopyFrom(const GPUBuffer& buffer) noexcept; + + void Swap(GPUBuffer& buffer) noexcept; + + [[nodiscard]] MLX_FORCEINLINE void* GetMap() const noexcept { return m_memory.map; } + [[nodiscard]] MLX_FORCEINLINE VkBuffer Get() const noexcept { return m_buffer; } + [[nodiscard]] MLX_FORCEINLINE VkDeviceMemory GetMemory() const noexcept { return m_memory.memory; } + [[nodiscard]] MLX_FORCEINLINE VkDeviceSize GetSize() const noexcept { return m_memory.size; } + [[nodiscard]] MLX_FORCEINLINE VkDeviceSize GetOffset() const noexcept { return 0; } + + [[nodiscard]] inline bool IsInit() const noexcept { return m_buffer != VK_NULL_HANDLE; } + + ~GPUBuffer() = default; + + protected: + void PushToGPU() noexcept; + + protected: + VkBuffer m_buffer = VK_NULL_HANDLE; + VmaAllocation m_allocation; + VkDeviceSize m_offset = 0; + VkDeviceSize m_size = 0; + void* p_map = nullptr; + + private: + void CreateBuffer(VkDeviceSize size, VkBufferUsageFlags usage, VmaAllocationCreateInfo alloc_info); + + private: + VkBufferUsageFlags m_usage = 0; + }; + + class VertexBuffer : public GPUBuffer + { + public: + inline void Init(std::uint32_t size, VkBufferUsageFlags additional_flags = 0) { GPUBuffer::Init(BufferType::LowDynamic, size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | additional_flags, {}); } + void SetData(CPUBuffer data); + inline void Bind(VkCommandBuffer cmd) const noexcept { VkDeviceSize offset = 0; vkCmdBindVertexBuffers(cmd, 0, 1, &m_buffer, &offset); } + }; + + class IndexBuffer : public GPUBuffer + { + public: + inline void Init(std::uint32_t size, VkBufferUsageFlags additional_flags = 0) { GPUBuffer::Init(BufferType::LowDynamic, size, VK_BUFFER_USAGE_INDEX_BUFFER_BIT | additional_flags, {}); } + void SetData(CPUBuffer data); + inline void Bind(VkCommandBuffer cmd) const noexcept { vkCmdBindIndexBuffer(cmd, m_buffer, 0, VK_INDEX_TYPE_UINT32); } + }; + + class UniformBuffer + { + public: + void Init(std::uint32_t size); + void SetData(CPUBuffer data, std::size_t frame_index); + void Destroy() noexcept; + + inline VkDeviceSize GetSize(int i) const noexcept { return m_buffers[i].GetSize(); } + inline VkDeviceSize GetOffset(int i) const noexcept { return m_buffers[i].GetOffset(); } + inline VkBuffer GetVk(int i) const noexcept { return m_buffers[i].Get(); } + inline GPUBuffer& Get(int i) noexcept { return m_buffers[i]; } + + private: + std::array m_buffers; + std::array m_maps; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Buffers/Buffer.h b/runtime/Includes/Renderer/Buffers/Buffer.h deleted file mode 100644 index f8532ab..0000000 --- a/runtime/Includes/Renderer/Buffers/Buffer.h +++ /dev/null @@ -1,66 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Buffer.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 23:18:52 by maldavid #+# #+# */ -/* Updated: 2024/04/23 14:20:49 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_BUFFER__ -#define __MLX_VK_BUFFER__ - -#include -#include -#include - -namespace mlx -{ - class Buffer : public CommandResource - { - public: - Buffer() = default; - - void Create(BufferType type, VkDeviceSize size, VkBufferUsageFlags usage, const char* name, const void* data = nullptr); - void Destroy() noexcept; - - inline void MapMem(void** data) noexcept { RenderCore::Get().GetAllocator().MapMemory(m_allocation, data); m_is_mapped = true; } - inline bool IsMapped() const noexcept { return m_is_mapped; } - inline void UnmapMem() noexcept { RenderCore::Get().GetAllocator().UnmapMemory(m_allocation); m_is_mapped = false; } - - void Flush(VkDeviceSize size = VK_WHOLE_SIZE, VkDeviceSize offset = 0); - bool CopyFromBuffer(const Buffer& buffer) noexcept; - - inline VkBuffer& operator()() noexcept { return m_buffer; } - inline VkBuffer& Get() noexcept { return m_buffer; } - inline VkDeviceSize GetSize() const noexcept { return m_size; } - inline VkDeviceSize GetOffset() const noexcept { return m_offset; } - - ~Buffer() = default; - - protected: - void PushToGPU() noexcept; - void Swap(Buffer& buffer) noexcept; - - protected: - VmaAllocation m_allocation; - VkBuffer m_buffer = VK_NULL_HANDLE; - VkDeviceSize m_offset = 0; - VkDeviceSize m_size = 0; - - private: - void CreateBuffer(VkBufferUsageFlags usage, VmaAllocationCreateInfo info, VkDeviceSize size, const char* name); - - private: - #ifdef DEBUG - std::string m_name; - #endif - VkBufferUsageFlags m_usage = 0; - bool m_is_mapped = false; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Buffers/IndexBuffer.h b/runtime/Includes/Renderer/Buffers/IndexBuffer.h deleted file mode 100644 index 877d518..0000000 --- a/runtime/Includes/Renderer/Buffers/IndexBuffer.h +++ /dev/null @@ -1,29 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* IndexBuffer.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/25 15:05:05 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:24:46 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __VK_IBO__ -#define __VK_IBO__ - -#include -#include - -namespace mlx -{ - class ConstantIndexBuffer : public Buffer - { - public: - inline void Create(std::uint32_t size, const std::uint16_t* data, const char* name) { Buffer::Create(BufferType::Constant, size, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, name, data); } - inline void Bind(Renderer& renderer) noexcept { renderer.GetActiveCmdBuffer().BindIndexBuffer(*this); } - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Buffers/UniformBuffer.h b/runtime/Includes/Renderer/Buffers/UniformBuffer.h deleted file mode 100644 index d7f6584..0000000 --- a/runtime/Includes/Renderer/Buffers/UniformBuffer.h +++ /dev/null @@ -1,50 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* UniformBuffer.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:45:29 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:39:32 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_UBO__ -#define __MLX_VK_UBO__ - -#include - -namespace mlx -{ - class UniformBuffer - { - public: - UniformBuffer() = default; - - void Create(NonOwningPtr renderer, std::uint32_t size, const char* name); - void Destroy() noexcept; - - void SetData(std::uint32_t size, const void* data); - - VkDeviceSize GetSize() noexcept; - VkDeviceSize GetOffset() noexcept; - VkDeviceMemory GetDeviceMemory() noexcept; - VkBuffer& operator()() noexcept; - VkBuffer& Get() noexcept; - - inline VkDeviceSize GetSize(int i) noexcept { return m_buffers[i].GetSize(); } - inline VkDeviceSize GetOffset(int i) noexcept { return m_buffers[i].GetOffset(); } - inline VkBuffer& operator()(int i) noexcept { return m_buffers[i].Get(); } - inline VkBuffer& Get(int i) noexcept { return m_buffers[i].Get(); } - - ~UniformBuffer() = default; - - private: - std::array m_buffers; - std::array m_maps; - NonOwningPtr p_renderer; - }; -} - -#endif // __MLX_VK_UBO__ diff --git a/runtime/Includes/Renderer/Buffers/VertexBuffer.h b/runtime/Includes/Renderer/Buffers/VertexBuffer.h deleted file mode 100644 index 8feb7c0..0000000 --- a/runtime/Includes/Renderer/Buffers/VertexBuffer.h +++ /dev/null @@ -1,46 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* VertexBuffer.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:27:38 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:24:41 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_VBO__ -#define __MLX_VK_VBO__ - -#include -#include -#include - -namespace mlx -{ - class RAMVertexBuffer : public Buffer - { - public: - inline void Create(std::uint32_t size, const void* data, const char* name) { Buffer::Create(BufferType::HighDynamic, size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, name, data); } - void SetData(std::uint32_t size, const void* data); - inline void Bind(Renderer& renderer) noexcept { renderer.GetActiveCmdBuffer().BindVertexBuffer(*this); } - }; - - class DeviceVertexBuffer : public Buffer - { - public: - inline void create(std::uint32_t size, const void* data, const char* name) { Buffer::Create(BufferType::LowDynamic, size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, name, data); } - void SetData(std::uint32_t size, const void* data); - inline void Bind(Renderer& renderer) noexcept { renderer.GetActiveCmdBuffer().BindVertexBuffer(*this); } - }; - - class ConstantVertexBuffer : public Buffer - { - public: - inline void Create(std::uint32_t size, const void* data, const char* name) { Buffer::Create(BufferType::Constant, size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, name, data); } - inline void Bind(Renderer& renderer) noexcept { renderer.GetActiveCmdBuffer().BindVertexBuffer(*this); } - }; -} - -#endif // __MLX_VK_VBO__ diff --git a/runtime/Includes/Renderer/Command/CommandBuffer.h b/runtime/Includes/Renderer/Command/CommandBuffer.h deleted file mode 100644 index 90a0a48..0000000 --- a/runtime/Includes/Renderer/Command/CommandBuffer.h +++ /dev/null @@ -1,70 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* CommandBuffer.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:25:42 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:37:54 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_CMD_BUFFER__ -#define __MLX_VK_CMD_BUFFER__ - -#include -#include - -namespace mlx -{ - class Buffer; - class Image; - - class CommandBuffer - { - public: - void Init(CommandBufferType type, NonOwningPtr manager); - void Init(CommandBufferType type, NonOwningPtr pool); - void Destroy() noexcept; - - void BeginRecord(VkCommandBufferUsageFlags usage = 0); - void Submit(NonOwningPtr signal, NonOwningPtr wait) noexcept; - void SubmitIdle(bool shouldWaitForExecution = true) noexcept; // TODO : handle `shouldWaitForExecution` as false by default (needs to modify CmdResources lifetimes to do so) - void UpdateSubmitState() noexcept; - inline void WaitForExecution() noexcept { m_fence.Wait(); UpdateSubmitState(); m_state = CommandBufferState::Ready; } - inline void Reset() noexcept { vkResetCommandBuffer(m_cmd_buffer, 0); } - void EndRecord(); - - void BindVertexBuffer(Buffer& buffer) noexcept; - void BindIndexBuffer(Buffer& buffer) noexcept; - void CopyBuffer(Buffer& dst, Buffer& src) noexcept; - void CopyBufferToImage(Buffer& buffer, Image& image) noexcept; - void CopyImagetoBuffer(Image& image, Buffer& buffer) noexcept; - void TransitionImageLayout(Image& image, VkImageLayout new_layout) noexcept; - - inline bool IsInit() const noexcept { return m_state != CommandBufferState::Uninit; } - inline bool IsReadyToBeUsed() const noexcept { return m_state == CommandBufferState::Ready; } - inline bool IsRecording() const noexcept { return m_state == CommandBufferState::Recording; } - inline bool HasBeenSubmitted() const noexcept { return m_state == CommandBufferState::Submitted; } - inline CommandBufferState GetCurrentState() const noexcept { return m_state; } - - inline VkCommandBuffer& operator()() noexcept { return m_cmd_buffer; } - inline VkCommandBuffer& Get() noexcept { return m_cmd_buffer; } - inline Fence& GetFence() noexcept { return m_fence; } - - private: - void PreTransferBarrier() noexcept; - void PostTransferBarrier() noexcept; - - private: - std::vector> m_cmd_resources; - Fence m_fence; - VkCommandBuffer m_cmd_buffer = VK_NULL_HANDLE; - NonOwningPtr m_pool; - CommandBufferState m_state = CommandBufferState::Uninit; - CommandBufferType m_type; - }; -} - -#endif // __MLX_VK_CMD_BUFFER__ diff --git a/runtime/Includes/Renderer/Command/CommandManager.h b/runtime/Includes/Renderer/Command/CommandManager.h deleted file mode 100644 index c0dcedc..0000000 --- a/runtime/Includes/Renderer/Command/CommandManager.h +++ /dev/null @@ -1,43 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* CommandManager.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/02 17:48:52 by maldavid #+# #+# */ -/* Updated: 2024/03/27 22:20:53 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_COMMAND_MANAGER__ -#define __MLX_COMMAND_MANAGER__ - -#include -#include -#include - -namespace mlx -{ - class CommandManager - { - public: - CommandManager() = default; - - void Init() noexcept; - void BeginRecord(int active_image_index); - void EndRecord(int active_image_index); - void Destroy() noexcept; - - inline CommandPool& GetCmdPool() noexcept { return m_cmd_pool; } - inline CommandBuffer& GetCmdBuffer(int i) noexcept { return m_cmd_buffers[i]; } - - ~CommandManager() = default; - - private: - std::array m_cmd_buffers; - CommandPool m_cmd_pool; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Command/CommandPool.h b/runtime/Includes/Renderer/Command/CommandPool.h deleted file mode 100644 index edb8f22..0000000 --- a/runtime/Includes/Renderer/Command/CommandPool.h +++ /dev/null @@ -1,36 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* CommandPool.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:24:12 by maldavid #+# #+# */ -/* Updated: 2024/03/27 22:33:15 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_CMD_POOL__ -#define __MLX_VK_CMD_POOL__ - -namespace mlx -{ - class CommandPool - { - public: - CommandPool() = default; - - void Init(); - void Destroy() noexcept; - - inline VkCommandPool& operator()() noexcept { return m_cmd_pool; } - inline VkCommandPool& Get() noexcept { return m_cmd_pool; } - - ~CommandPool() = default; - - private: - VkCommandPool m_cmd_pool = VK_NULL_HANDLE; - }; -} - -#endif // __MLX_VK_CMD_POOL__ diff --git a/runtime/Includes/Renderer/Command/CommandResource.h b/runtime/Includes/Renderer/Command/CommandResource.h deleted file mode 100644 index e9c367e..0000000 --- a/runtime/Includes/Renderer/Command/CommandResource.h +++ /dev/null @@ -1,38 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* CommandResource.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/16 20:44:29 by maldavid #+# #+# */ -/* Updated: 2024/03/27 22:37:06 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_COMMAND_RESOURCE__ -#define __MLX_COMMAND_RESOURCE__ - -#include -#include - -namespace mlx -{ - class CommandResource - { - friend class SingleTimeCmdManager; - - public: - CommandResource() : m_uuid() {} - inline void RecordedInCmdBuffer() noexcept { m_state = CommandResourceState::Held; } - inline void RemovedFromCmdBuffer() noexcept { m_state = CommandResourceState::Free; } - inline UUID GetUUID() const noexcept { return m_uuid; } - virtual ~CommandResource() = default; - - private: - UUID m_uuid; - CommandResourceState m_state = CommandResourceState::Free; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Command/SingleTimeCmdManager.h b/runtime/Includes/Renderer/Command/SingleTimeCmdManager.h deleted file mode 100644 index 1ac2ec6..0000000 --- a/runtime/Includes/Renderer/Command/SingleTimeCmdManager.h +++ /dev/null @@ -1,49 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* SingleTimeCmdManager.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/15 18:25:57 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:38:11 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_SINGLE_TIME_CMD_MANAGER__ -#define __MLX_SINGLE_TIME_CMD_MANAGER__ - -#include -#include - -namespace mlx -{ - class CommandBuffer; - - class SingleTimeCmdManager - { - friend class RenderCore; - - public: - SingleTimeCmdManager() = default; - - void Init() noexcept; - void Destroy() noexcept; - - void UpdateSingleTimesCmdBuffersSubmitState() noexcept; - void WaitForAllExecutions() noexcept; - - inline CommandPool& GetCmdPool() noexcept { return m_pool; } - CommandBuffer& GetCmdBuffer() noexcept; - - ~SingleTimeCmdManager() = default; - - inline static constexpr const std::uint8_t BASE_POOL_SIZE = 16; - - private: - std::vector m_buffers; - CommandPool m_pool; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Core/Device.h b/runtime/Includes/Renderer/Core/Device.h deleted file mode 100644 index c46aaf6..0000000 --- a/runtime/Includes/Renderer/Core/Device.h +++ /dev/null @@ -1,40 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Device.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:13:42 by maldavid #+# #+# */ -/* Updated: 2024/03/27 22:47:21 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_DEVICE__ -#define __MLX_VK_DEVICE__ - -namespace mlx -{ - class Device - { - public: - void Init(); - void Destroy() noexcept; - - inline VkDevice& operator()() noexcept { return m_device; } - inline VkDevice& Get() noexcept { return m_device; } - - inline VkPhysicalDevice& GetPhysicalDevice() noexcept { return m_physical_device; } - - private: - void PickPhysicalDevice(); - bool CheckDeviceExtensionSupport(VkPhysicalDevice device); - int DeviceScore(VkPhysicalDevice device); - - private: - VkPhysicalDevice m_physical_device = VK_NULL_HANDLE; - VkDevice m_device = VK_NULL_HANDLE; - }; -} - -#endif // __MLX_VK_DEVICE__ diff --git a/runtime/Includes/Renderer/Core/DrawableResource.h b/runtime/Includes/Renderer/Core/DrawableResource.h deleted file mode 100644 index a043d3a..0000000 --- a/runtime/Includes/Renderer/Core/DrawableResource.h +++ /dev/null @@ -1,28 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* DrawableResource.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/10 21:00:37 by maldavid #+# #+# */ -/* Updated: 2024/04/23 18:10:56 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_DRAWABLE_RESOURCE__ -#define __MLX_DRAWABLE_RESOURCE__ - -namespace mlx -{ - class DrawableResource - { - public: - DrawableResource() = default; - virtual void Render(class Renderer& renderer) = 0; - virtual void ResetUpdate() {} - virtual ~DrawableResource() = default; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Core/Fence.h b/runtime/Includes/Renderer/Core/Fence.h deleted file mode 100644 index ea46aae..0000000 --- a/runtime/Includes/Renderer/Core/Fence.h +++ /dev/null @@ -1,40 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Fence.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/02 17:52:09 by maldavid #+# #+# */ -/* Updated: 2024/03/27 22:48:31 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_FENCE__ -#define __MLX_VK_FENCE__ - -namespace mlx -{ - class Fence - { - public: - Fence() = default; - - void Init(); - - inline VkFence& Get() noexcept { return m_fence; } - void Wait() noexcept; - void Reset() noexcept; - bool IsReady() const noexcept; - MLX_FORCEINLINE void WaitAndReset() noexcept { Wait(); Reset(); } - - void Destroy() noexcept; - - ~Fence() = default; - - private: - VkFence m_fence = VK_NULL_HANDLE; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Core/Instance.h b/runtime/Includes/Renderer/Core/Instance.h deleted file mode 100644 index cecb73d..0000000 --- a/runtime/Includes/Renderer/Core/Instance.h +++ /dev/null @@ -1,38 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Instance.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:03:04 by maldavid #+# #+# */ -/* Updated: 2024/04/23 18:44:02 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_INSTANCE__ -#define __MLX_VK_INSTANCE__ - -namespace mlx -{ - class Instance - { - public: - void Init(); - void Destroy() noexcept; - - inline std::uint32_t GetInstanceVersion() const noexcept { return m_instance_version; } - - inline VkInstance& operator()() noexcept { return m_instance; } - inline VkInstance& Get() noexcept { return m_instance; } - - private: - std::vector GetRequiredExtensions(); - - private: - VkInstance m_instance = VK_NULL_HANDLE; - std::uint32_t m_instance_version = 0; - }; -} - -#endif // __MLX_VK_INSTANCE__ diff --git a/runtime/Includes/Renderer/Core/Queues.h b/runtime/Includes/Renderer/Core/Queues.h deleted file mode 100644 index 2fdc7ec..0000000 --- a/runtime/Includes/Renderer/Core/Queues.h +++ /dev/null @@ -1,51 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Queues.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:01:49 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:38:50 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_QUEUES__ -#define __MLX_VK_QUEUES__ - -namespace mlx -{ - class Queues - { - public: - struct QueueFamilyIndices - { - std::optional graphics_family; - std::optional present_family; - - inline bool IsComplete() { return graphics_family.has_value() && present_family.has_value(); } - }; - - public: - QueueFamilyIndices FindQueueFamilies(VkPhysicalDevice device); - - void Init(); - - inline VkQueue& GetGraphic() noexcept { return m_graphics_queue; } - inline VkQueue& GetPresent() noexcept { return m_present_queue; } - inline QueueFamilyIndices GetFamilies() noexcept - { - if(m_families.has_value()) - return *m_families; - FatalError("Vulkan : cannot get queue families, not init"); - return {}; // just to avoid warnings - } - - private: - VkQueue m_graphics_queue; - VkQueue m_present_queue; - std::optional m_families; - }; -} - -#endif // __MLX_VK_QUEUES__ diff --git a/runtime/Includes/Renderer/Core/RenderCore.h b/runtime/Includes/Renderer/Core/RenderCore.h deleted file mode 100644 index b0939e7..0000000 --- a/runtime/Includes/Renderer/Core/RenderCore.h +++ /dev/null @@ -1,78 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* RenderCore.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:16:32 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:39:11 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_RENDER_CORE__ -#define __MLX_RENDER_CORE__ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -namespace mlx -{ - const char* VerbaliseVkResult(VkResult result); - VkPipelineStageFlags AccessFlagsToPipelineStage(VkAccessFlags access_flags, VkPipelineStageFlags stage_flags); - - #ifdef DEBUG - constexpr const bool enable_validation_layers = true; - #else - constexpr const bool enable_validation_layers = false; - #endif - - const std::vector validation_layers = { "VK_LAYER_KHRONOS_validation" }; - - constexpr const int MAX_FRAMES_IN_FLIGHT = 3; - constexpr const int MAX_SETS_PER_POOL = 512; - constexpr const int NUMBER_OF_UNIFORM_BUFFERS = 1; // change this if for wathever reason more than one uniform buffer is needed - - class RenderCore : public Singleton - { - friend class Singleton; - - public: - void Init(); - void Destroy(); - - inline bool IsInit() const noexcept { return m_is_init; } - inline Instance& GetInstance() noexcept { return m_instance; } - inline Device& GetDevice() noexcept { return m_device; } - inline Queues& GetQueue() noexcept { return m_queues; } - inline GPUallocator& GetAllocator() noexcept { return m_allocator; } - inline ValidationLayers& GetLayers() noexcept { return m_layers; } - inline CommandBuffer& GetSingleTimeCmdBuffer() noexcept { return m_cmd_manager.GetCmdBuffer(); } - inline SingleTimeCmdManager& GetSingleTimeCmdManager() noexcept { return m_cmd_manager; } - inline DescriptorPool& GetDescriptorPool() { return m_pool_manager.GetAvailablePool(); } - - private: - RenderCore() = default; - ~RenderCore() = default; - - private: - ValidationLayers m_layers; - SingleTimeCmdManager m_cmd_manager; - Queues m_queues; - DescriptorPoolManager m_pool_manager; - Device m_device; - Instance m_instance; - GPUallocator m_allocator; - bool m_is_init = false; - }; -} - -#endif // __MLX_RENDER_CORE__ diff --git a/runtime/Includes/Renderer/Core/Semaphore.h b/runtime/Includes/Renderer/Core/Semaphore.h deleted file mode 100644 index 8e071b8..0000000 --- a/runtime/Includes/Renderer/Core/Semaphore.h +++ /dev/null @@ -1,31 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Semaphore.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 18:59:38 by maldavid #+# #+# */ -/* Updated: 2024/03/27 22:56:51 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_SEMAPHORE__ -#define __MLX_VK_SEMAPHORE__ - -namespace mlx -{ - class Semaphore - { - public: - void Init(); - void Destroy() noexcept; - - inline VkSemaphore& Get() noexcept { return m_semaphore; } - - private: - VkSemaphore m_semaphore = VK_NULL_HANDLE; - }; -} - -#endif // __MLX_VK_SEMAPHORE__ diff --git a/runtime/Includes/Renderer/Core/Surface.h b/runtime/Includes/Renderer/Core/Surface.h deleted file mode 100644 index 01a2785..0000000 --- a/runtime/Includes/Renderer/Core/Surface.h +++ /dev/null @@ -1,34 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Surface.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 18:57:55 by maldavid #+# #+# */ -/* Updated: 2024/03/27 22:58:15 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_SURFACE__ -#define __MLX_VK_SURFACE__ - -namespace mlx -{ - class Surface - { - public: - void Create(class Renderer& renderer); - void Destroy() noexcept; - - VkSurfaceFormatKHR ChooseSwapSurfaceFormat(const std::vector& available_formats); - - inline VkSurfaceKHR& operator()() noexcept { return m_surface; } - inline VkSurfaceKHR& Get() noexcept { return m_surface; } - - private: - VkSurfaceKHR m_surface = VK_NULL_HANDLE; - }; -} - -#endif // __MLX_VK_SURFACE__ diff --git a/runtime/Includes/Renderer/Core/ValidationLayers.h b/runtime/Includes/Renderer/Core/ValidationLayers.h deleted file mode 100644 index 0dc3e45..0000000 --- a/runtime/Includes/Renderer/Core/ValidationLayers.h +++ /dev/null @@ -1,44 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* ValidationLayers.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/12/19 14:04:25 by maldavid #+# #+# */ -/* Updated: 2024/04/23 19:16:25 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __VK_VALIDATION_LAYERS__ -#define __VK_VALIDATION_LAYERS__ - -namespace mlx -{ - class ValidationLayers - { - public: - ValidationLayers() = default; - - void Init(); - void Destroy(); - - bool CheckValidationLayerSupport(); - void PopulateDebugMessengerCreateInfo(VkDebugUtilsMessengerCreateInfoEXT& create_info); - - VkResult SetDebugUtilsObjectNameEXT(VkObjectType object_type, std::uint64_t object_handle, const char* object_name); - - ~ValidationLayers() = default; - - private: - VkResult CreateDebugUtilsMessengerEXT(const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator); - static VKAPI_ATTR VkBool32 VKAPI_CALL DebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT message_severity, VkDebugUtilsMessageTypeFlagsEXT message_type, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, void* pUserData); - void DestroyDebugUtilsMessengerEXT(const VkAllocationCallbacks* pAllocator); - - private: - VkDebugUtilsMessengerEXT m_debug_messenger; - PFN_vkSetDebugUtilsObjectNameEXT f_vkSetDebugUtilsObjectNameEXT = nullptr; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Descriptor.h b/runtime/Includes/Renderer/Descriptor.h new file mode 100644 index 0000000..9a28ee3 --- /dev/null +++ b/runtime/Includes/Renderer/Descriptor.h @@ -0,0 +1,48 @@ +#ifndef __MLX_DESCRIPTOR_SET__ +#define __MLX_DESCRIPTOR_SET__ + +#include +#include +#include +#include + +namespace mlx +{ + struct Descriptor + { + NonOwningPtr storage_buffer_ptr; + NonOwningPtr uniform_buffer_ptr; + NonOwningPtr image_ptr; + VkDescriptorType type; + ShaderType shader_type; + std::uint32_t binding; + }; + + class DescriptorSet + { + public: + DescriptorSet() { m_set.fill(VK_NULL_HANDLE); } + DescriptorSet(const ShaderSetLayout& layout, VkDescriptorSetLayout vklayout, ShaderType shader_type); + + void SetImage(std::size_t i, std::uint32_t binding, class Image& image); + void SetStorageBuffer(std::size_t i, std::uint32_t binding, class GPUBuffer& buffer); + void SetUniformBuffer(std::size_t i, std::uint32_t binding, class GPUBuffer& buffer); + void Update(std::size_t i, VkCommandBuffer cmd = VK_NULL_HANDLE) noexcept; + + [[nodiscard]] inline VkDescriptorSet GetSet(std::size_t i) const noexcept { return m_set[i]; } + [[nodiscard]] inline DescriptorSet Duplicate() const { return DescriptorSet{ m_set_layout, m_descriptors }; } + [[nodiscard]] inline bool IsInit() const noexcept { return m_set[0] != VK_NULL_HANDLE; } + + ~DescriptorSet() = default; + + private: + DescriptorSet(VkDescriptorSetLayout layout, const std::vector& descriptors); + + private: + std::vector m_descriptors; + std::array m_set; + VkDescriptorSetLayout m_set_layout; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Descriptors/DescriptorPool.h b/runtime/Includes/Renderer/Descriptors/DescriptorPool.h deleted file mode 100644 index 1b09905..0000000 --- a/runtime/Includes/Renderer/Descriptors/DescriptorPool.h +++ /dev/null @@ -1,42 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* DescriptorPool.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/23 18:32:43 by maldavid #+# #+# */ -/* Updated: 2024/04/23 19:36:03 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __VK_DESCRIPTOR_POOL__ -#define __VK_DESCRIPTOR_POOL__ - -namespace mlx -{ - class DescriptorPool - { - public: - DescriptorPool() = default; - - void Init(std::vector sizes); - VkDescriptorSet AllocateDescriptorSet(class DescriptorSetLayout& layout); - void FreeDescriptor(VkDescriptorSet set); - void Destroy() noexcept; - - inline VkDescriptorPool& operator()() noexcept { return m_pool; } - inline VkDescriptorPool& Get() noexcept { return m_pool; } - inline std::size_t GetNumberOfSetsAllocated() const noexcept { return m_allocated_sets; } - - inline bool IsInit() const noexcept { return m_pool != VK_NULL_HANDLE; } - - ~DescriptorPool() = default; - - private: - VkDescriptorPool m_pool = VK_NULL_HANDLE; - std::size_t m_allocated_sets = 0; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Descriptors/DescriptorPoolManager.h b/runtime/Includes/Renderer/Descriptors/DescriptorPoolManager.h deleted file mode 100644 index 5c032aa..0000000 --- a/runtime/Includes/Renderer/Descriptors/DescriptorPoolManager.h +++ /dev/null @@ -1,35 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* DescriptorPoolManager.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/20 06:26:26 by maldavid #+# #+# */ -/* Updated: 2024/04/23 19:40:22 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_DESCRIPTOR_POOL_MANAGER__ -#define __MLX_DESCRIPTOR_POOL_MANAGER__ - -#include - -namespace mlx -{ - class DescriptorPoolManager - { - public: - DescriptorPoolManager() = default; - - DescriptorPool& GetAvailablePool(); - void DestroyAllPools(); - - ~DescriptorPoolManager() = default; - - private: - std::list m_pools; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Descriptors/DescriptorSet.h b/runtime/Includes/Renderer/Descriptors/DescriptorSet.h deleted file mode 100644 index f687c8c..0000000 --- a/runtime/Includes/Renderer/Descriptors/DescriptorSet.h +++ /dev/null @@ -1,56 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* DescriptorSet.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/23 18:39:36 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:42:22 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __VK_DESCRIPTOR_SET__ -#define __VK_DESCRIPTOR_SET__ - -#include -#include - -namespace mlx -{ - class DescriptorSet - { - public: - DescriptorSet() = default; - - void Init(NonOwningPtr renderer, NonOwningPtr pool, DescriptorSetLayout layout); - - void WriteDescriptor(int binding, NonOwningPtr ubo) const noexcept; - void WriteDescriptor(int binding, const class Image& image) const noexcept; - - inline bool IsInit() const noexcept { return p_pool && p_renderer; } - - void Bind() noexcept; - - DescriptorSet Duplicate(); - - VkDescriptorSet& operator()() noexcept; - VkDescriptorSet& Get() noexcept; - - inline const DescriptorSetLayout& GetLayout() const noexcept { return m_layout; } - - inline const std::array& GetAllFramesDescriptorSets() const { return m_desc_set; } - - void Destroy() noexcept; - - ~DescriptorSet() = default; - - private: - DescriptorSetLayout m_layout; - std::array m_desc_set; - NonOwningPtr p_pool; - NonOwningPtr p_renderer; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Descriptors/DescriptorSetLayout.h b/runtime/Includes/Renderer/Descriptors/DescriptorSetLayout.h deleted file mode 100644 index ce13a5f..0000000 --- a/runtime/Includes/Renderer/Descriptors/DescriptorSetLayout.h +++ /dev/null @@ -1,38 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* DescriptorSetLayout.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/23 18:36:22 by maldavid #+# #+# */ -/* Updated: 2024/04/23 22:15:01 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __VK_DESCRIPTOR_SET_LAYOUT__ -#define __VK_DESCRIPTOR_SET_LAYOUT__ - -namespace mlx -{ - class DescriptorSetLayout - { - public: - DescriptorSetLayout() = default; - - void Init(std::vector> binds, VkShaderStageFlagBits stage); - void Destroy() noexcept; - - inline VkDescriptorSetLayout operator()() const noexcept { return m_layout; } - inline VkDescriptorSetLayout Get() const noexcept { return m_layout; } - inline const std::vector>& GetBindings() const noexcept { return m_bindings; } - - ~DescriptorSetLayout() = default; - - private: - std::vector> m_bindings; - VkDescriptorSetLayout m_layout = VK_NULL_HANDLE; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Enums.h b/runtime/Includes/Renderer/Enums.h index 30083b4..684f4e0 100644 --- a/runtime/Includes/Renderer/Enums.h +++ b/runtime/Includes/Renderer/Enums.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Enums.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 22:02:58 by maldavid #+# #+# */ -/* Updated: 2024/03/27 22:39:31 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_RENDERER_ENUMS__ #define __MLX_RENDERER_ENUMS__ @@ -18,6 +6,7 @@ namespace mlx enum class BufferType { Constant = 0, + Staging, HighDynamic, // typically stored in RAM LowDynamic, // typically stored in VRAM @@ -25,35 +14,13 @@ namespace mlx }; constexpr std::size_t BufferTypeCount = static_cast(BufferType::EndEnum); - enum class CommandResourceState + enum class ImageType { - Held = 0, - Free, + Color = 0, EndEnum }; - constexpr std::size_t CommandResourceStateCount = static_cast(CommandResourceState::EndEnum); - - enum class CommandBufferState - { - Uninit = 0, // buffer not initialized or destroyed - Ready, // buffer ready to be used after having been submitted - Idle, // buffer has recorded informations but has not been submitted - Recording, // buffer is currently recording - Submitted, // buffer has been submitted - - EndEnum - }; - constexpr std::size_t CommandBufferStateCount = static_cast(CommandBufferState::EndEnum); - - enum class CommandBufferType - { - SingleTime = 0, - LongTime, - - EndEnum - }; - constexpr std::size_t CommandBufferTypeCount = static_cast(CommandBufferType::EndEnum); + constexpr std::size_t ImageTypeCount = static_cast(ImageType::EndEnum); } #endif diff --git a/runtime/Includes/Renderer/Image.h b/runtime/Includes/Renderer/Image.h new file mode 100644 index 0000000..2c3809e --- /dev/null +++ b/runtime/Includes/Renderer/Image.h @@ -0,0 +1,102 @@ +#ifndef __MLX_IMAGE__ +#define __MLX_IMAGE__ + +#include +#include +#include +#include +#include + +namespace mlx +{ + class Image + { + public: + Image() = default; + + inline void Init(VkImage image, VkFormat format, std::uint32_t width, std::uint32_t height, VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED) noexcept + { + m_image = image; + m_format = format; + m_width = width; + m_height = height; + m_layout = layout; + } + + void Init(ImageType type, std::uint32_t width, std::uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VkMemoryPropertyFlags properties, bool is_multisampled = false); + void CreateImageView(VkImageViewType type, VkImageAspectFlags aspectFlags, int layer_count = 1) noexcept; + void CreateSampler() noexcept; + void TransitionLayout(VkImageLayout new_layout, VkCommandBuffer cmd = VK_NULL_HANDLE); + void Clear(VkCommandBuffer cmd, Vec4f color); + + void DestroySampler() noexcept; + void DestroyImageView() noexcept; + virtual void Destroy() noexcept; + + [[nodiscard]] MLX_FORCEINLINE VkImage Get() const noexcept { return m_image; } + [[nodiscard]] MLX_FORCEINLINE VkImage operator()() const noexcept { return m_image; } + [[nodiscard]] MLX_FORCEINLINE VkDeviceMemory GetDeviceMemory() const noexcept { return m_memory.memory; } + [[nodiscard]] MLX_FORCEINLINE VkImageView GetImageView() const noexcept { return m_image_view; } + [[nodiscard]] MLX_FORCEINLINE VkFormat GetFormat() const noexcept { return m_format; } + [[nodiscard]] MLX_FORCEINLINE VkImageTiling GetTiling() const noexcept { return m_tiling; } + [[nodiscard]] MLX_FORCEINLINE VkImageLayout GetLayout() const noexcept { return m_layout; } + [[nodiscard]] MLX_FORCEINLINE VkSampler GetSampler() const noexcept { return m_sampler; } + [[nodiscard]] MLX_FORCEINLINE std::uint32_t GetWidth() const noexcept { return m_width; } + [[nodiscard]] MLX_FORCEINLINE std::uint32_t GetHeight() const noexcept { return m_height; } + [[nodiscard]] MLX_FORCEINLINE bool IsInit() const noexcept { return m_image != VK_NULL_HANDLE; } + [[nodiscard]] MLX_FORCEINLINE ImageType GetType() const noexcept { return m_type; } + + virtual ~Image() = default; + + private: + VmaAllocation m_allocation; + VkImage m_image = VK_NULL_HANDLE; + VkImageView m_image_view = VK_NULL_HANDLE; + VkSampler m_sampler = VK_NULL_HANDLE; + VkFormat m_format; + VkImageTiling m_tiling; + VkImageLayout m_layout = VK_IMAGE_LAYOUT_UNDEFINED; + ImageType m_type; + std::uint32_t m_width = 0; + std::uint32_t m_height = 0; + bool m_is_multisampled = false; + }; + + class Texture : public Image + { + public: + Texture() = default; + Texture(CPUBuffer pixels, std::uint32_t width, std::uint32_t height, VkFormat format = VK_FORMAT_R8G8B8A8_SRGB, bool is_multisampled = false) + { + Init(std::move(pixels), width, height, format, is_multisampled); + } + inline void Init(CPUBuffer pixels, std::uint32_t width, std::uint32_t height, VkFormat format = VK_FORMAT_R8G8B8A8_SRGB, bool is_multisampled = false) + { + Image::Init(ImageType::Color, width, height, format, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, is_multisampled); + Image::CreateImageView(VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_ASPECT_COLOR_BIT); + Image::CreateSampler(); + if(pixels) + { + TransitionLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); + GPUBuffer staging_buffer; + std::size_t size = width * height * kvfFormatSize(format); + staging_buffer.Init(BufferType::Staging, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, pixels); + VkCommandBuffer cmd = kvfCreateCommandBuffer(RenderCore::Get().GetDevice()); + kvfBeginCommandBuffer(cmd, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT); + kvfCopyBufferToImage(cmd, Image::Get(), staging_buffer.Get(), staging_buffer.GetOffset(), VK_IMAGE_ASPECT_COLOR_BIT, { width, height, 1 }); + vkEndCommandBuffer(cmd); + VkFence fence = kvfCreateFence(RenderCore::Get().GetDevice()); + kvfSubmitSingleTimeCommandBuffer(RenderCore::Get().GetDevice(), cmd, KVF_GRAPHICS_QUEUE, fence); + kvfDestroyFence(RenderCore::Get().GetDevice(), fence); + staging_buffer.Destroy(); + } + if(!pixels) + TransitionLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + else + TransitionLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + } + ~Texture() override { Destroy(); } + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Images/Image.h b/runtime/Includes/Renderer/Images/Image.h deleted file mode 100644 index e86b12d..0000000 --- a/runtime/Includes/Renderer/Images/Image.h +++ /dev/null @@ -1,84 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Image.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/25 11:54:21 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:40:51 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_IMAGE__ -#define __MLX_VK_IMAGE__ - -#include -#include -#include - -namespace mlx -{ - std::uint32_t FormatSize(VkFormat format); - bool IsStencilFormat(VkFormat format); - bool IsDepthFormat(VkFormat format); - VkFormat BitsToFormat(std::uint32_t bits); - VkPipelineStageFlags LayoutToAccessMask(VkImageLayout layout, bool is_destination); - - class Image : public CommandResource - { - friend class Swapchain; - - public: - Image() = default; - - inline void Create(VkImage image, VkFormat format, std::uint32_t width, std::uint32_t height, VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED) noexcept - { - m_image = image; - m_format = format; - m_width = width; - m_height = height; - m_layout = layout; - } - void Create(std::uint32_t width, std::uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, const char* name, bool decated_memory = false); - void CreateImageView(VkImageViewType type, VkImageAspectFlags aspect_flags) noexcept; - void CreateSampler() noexcept; - void CopyFromBuffer(class Buffer& buffer); - void CopyToBuffer(class Buffer& buffer); - void TransitionLayout(VkImageLayout new_layout, NonOwningPtr cmd = nullptr); - virtual void Destroy() noexcept; - - inline VkImage Get() noexcept { return m_image; } - inline VkImage operator()() noexcept { return m_image; } - inline VkImageView GetImageView() const noexcept { return m_image_view; } - inline VkFormat GetFormat() const noexcept { return m_format; } - inline VkImageTiling GetTiling() const noexcept { return m_tiling; } - inline VkImageLayout GetLayout() const noexcept { return m_layout; } - inline VkSampler GetSampler() const noexcept { return m_sampler; } - inline std::uint32_t GetWidth() const noexcept { return m_width; } - inline std::uint32_t GetHeight() const noexcept { return m_height; } - inline bool IsInit() const noexcept { return m_image != VK_NULL_HANDLE; } - - virtual ~Image() = default; - - private: - void DestroySampler() noexcept; - void DestroyImageView() noexcept; - - private: - VmaAllocation m_allocation; - VkImage m_image = VK_NULL_HANDLE; - VkImageView m_image_view = VK_NULL_HANDLE; - VkSampler m_sampler = VK_NULL_HANDLE; - #ifdef DEBUG - std::string m_name; - #endif - VkFormat m_format; - VkImageTiling m_tiling; - VkImageLayout m_layout = VK_IMAGE_LAYOUT_UNDEFINED; - std::uint32_t m_width = 0; - std::uint32_t m_height = 0; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Images/Texture.h b/runtime/Includes/Renderer/Images/Texture.h deleted file mode 100644 index 5cc468d..0000000 --- a/runtime/Includes/Renderer/Images/Texture.h +++ /dev/null @@ -1,63 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Texture.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/03/08 02:24:58 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:42:41 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXTURE__ -#define __MLX_TEXTURE__ - -#include -#include -#include -#include - -namespace mlx -{ - class Texture : public Image - { - public: - Texture() = default; - - void Create(std::uint8_t* pixels, std::uint32_t width, std::uint32_t height, VkFormat format, const char* name, bool dedicated_memory = false); - void Render(class Renderer& renderer, int x, int y); - void Destroy() noexcept override; - - void SetPixel(int x, int y, std::uint32_t color) noexcept; - int GetPixel(int x, int y) noexcept; - - inline void SetDescriptor(DescriptorSet&& set) noexcept { m_set = set; } - inline VkDescriptorSet GetSet() noexcept { return m_set.IsInit() ? m_set.Get() : VK_NULL_HANDLE; } - inline void UpdateSet(int binding) noexcept { m_set.WriteDescriptor(binding, *this); m_has_set_been_updated = true; } - inline bool HasBeenUpdated() const noexcept { return m_has_set_been_updated; } - inline constexpr void ResetUpdate() noexcept { m_has_set_been_updated = false; } - - ~Texture() = default; - - private: - void OpenCPUmap(); - - private: - ConstantVertexBuffer m_vbo; - ConstantIndexBuffer m_ibo; - #ifdef DEBUG - std::string m_name; - #endif - DescriptorSet m_set; - std::vector m_cpu_map; - std::optional m_buf_map = std::nullopt; - void* m_map = nullptr; - bool m_has_been_modified = false; - bool m_has_set_been_updated = false; - }; - - Texture* StbTextureLoad(std::filesystem::path file, int* w, int* h); -} - -#endif diff --git a/runtime/Includes/Renderer/Images/TextureAtlas.h b/runtime/Includes/Renderer/Images/TextureAtlas.h deleted file mode 100644 index 669097f..0000000 --- a/runtime/Includes/Renderer/Images/TextureAtlas.h +++ /dev/null @@ -1,43 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* TextureAtlas.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/07 16:36:33 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:52:40 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXTURE_ATLAS__ -#define __MLX_TEXTURE_ATLAS__ - -#include - -namespace mlx -{ - class TextureAtlas : public Image - { - public: - TextureAtlas() = default; - - void Create(std::uint8_t* pixels, std::uint32_t width, std::uint32_t height, VkFormat format, const char* name, bool dedicated_memory = false); - void Render(class Renderer& renderer, int x, int y, std::uint32_t ibo_size) const; - void Destroy() noexcept override; - - inline void SetDescriptor(DescriptorSet&& set) noexcept { m_set = set; } - inline DescriptorSet GetSet() noexcept { return m_set; } - inline void UpdateSet(int binding) noexcept { m_set.WriteDescriptor(binding, *this); m_has_been_updated = true; } - inline bool HasBeenUpdated() const noexcept { return m_has_been_updated; } - inline constexpr void ResetUpdate() noexcept { m_has_been_updated = false; } - - ~TextureAtlas() = default; - - private: - DescriptorSet m_set; - bool m_has_been_updated = false; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Images/TextureDescriptor.h b/runtime/Includes/Renderer/Images/TextureDescriptor.h deleted file mode 100644 index 7a9706b..0000000 --- a/runtime/Includes/Renderer/Images/TextureDescriptor.h +++ /dev/null @@ -1,59 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* TextureDescriptor.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/11 01:00:13 by maldavid #+# #+# */ -/* Updated: 2024/04/23 22:08:02 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXTURE_DESCRIPTOR__ -#define __MLX_TEXTURE_DESCRIPTOR__ - -#include -#include -#include - -namespace mlx -{ - struct TextureRenderDescriptor : public DrawableResource - { - NonOwningPtr texture; - int x; - int y; - - TextureRenderDescriptor(NonOwningPtr _texture, int _x, int _y) : texture(_texture), x(_x), y(_y) {} - inline bool operator==(const TextureRenderDescriptor& rhs) const { return texture == rhs.texture && x == rhs.x && y == rhs.y; } - inline void Render(class Renderer& renderer) override - { - if(!texture->IsInit()) - return; - texture->Render(renderer, x, y); - } - inline void ResetUpdate() override - { - if(!texture->IsInit()) - return; - texture->ResetUpdate(); - } - }; -} - -namespace std -{ - template <> - struct hash - { - std::size_t operator()(const mlx::TextureRenderDescriptor& d) const noexcept - { - std::size_t hash = 0; - mlx::HashCombine(hash, d.texture, d.x, d.y); - return hash; - } - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Images/TextureRegistry.h b/runtime/Includes/Renderer/Images/TextureRegistry.h deleted file mode 100644 index c1248e9..0000000 --- a/runtime/Includes/Renderer/Images/TextureRegistry.h +++ /dev/null @@ -1,39 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* TextureRegistry.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/11 00:56:15 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:43:48 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXTURE_MANAGER__ -#define __MLX_TEXTURE_MANAGER__ - -#include - -namespace mlx -{ - class TextureRegistry - { - public: - TextureRegistry() = default; - - inline void Clear(); - inline std::pair, bool> RegisterTexture(NonOwningPtr texture, int x, int y); - inline bool IsTextureKnown(NonOwningPtr texture) noexcept; - inline void EraseTextures(NonOwningPtr texture); - - ~TextureRegistry() = default; - - private: - std::unordered_set m_texture_descriptors; - }; -} - -#include - -#endif diff --git a/runtime/Includes/Renderer/Images/TextureRegistry.inl b/runtime/Includes/Renderer/Images/TextureRegistry.inl deleted file mode 100644 index 5a8986b..0000000 --- a/runtime/Includes/Renderer/Images/TextureRegistry.inl +++ /dev/null @@ -1,52 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* TextureRegistry.inl :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/04/23 22:08:46 by maldavid #+# #+# */ -/* Updated: 2024/04/23 22:11:09 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#pragma once -#include - -namespace mlx -{ - void TextureRegistry::Clear() - { - m_texture_descriptors.clear(); - } - - std::pair, bool> TextureRegistry::RegisterTexture(NonOwningPtr texture, int x, int y) - { - MLX_PROFILE_FUNCTION(); - auto res = m_texture_descriptors.emplace(texture, x, y); - return std::make_pair(static_cast(&const_cast(*res.first)), res.second); - } - - bool TextureRegistry::IsTextureKnown(NonOwningPtr texture) noexcept - { - MLX_PROFILE_FUNCTION(); - for(const auto& desc : m_texture_descriptors) - { - if(desc.texture == texture) - return true; - } - return false; - } - - void TextureRegistry::EraseTextures(NonOwningPtr texture) - { - MLX_PROFILE_FUNCTION(); - for(auto it = m_texture_descriptors.begin(); it != m_texture_descriptors.end();) - { - if(it->texture == texture) - it = m_texture_descriptors.erase(it); - else - ++it; - } - } -} diff --git a/runtime/Includes/Renderer/Core/Memory.h b/runtime/Includes/Renderer/Memory.h similarity index 50% rename from runtime/Includes/Renderer/Core/Memory.h rename to runtime/Includes/Renderer/Memory.h index a29be0b..2470630 100644 --- a/runtime/Includes/Renderer/Core/Memory.h +++ b/runtime/Includes/Renderer/Memory.h @@ -1,24 +1,12 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Memory.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/10/20 02:13:03 by maldavid #+# #+# */ -/* Updated: 2024/03/27 22:49:57 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_VK_MEMORY__ #define __MLX_VK_MEMORY__ namespace mlx { - class GPUallocator + class GPUAllocator { public: - GPUallocator() = default; + GPUAllocator() = default; void Init() noexcept; void Destroy() noexcept; @@ -36,7 +24,7 @@ namespace mlx void Flush(VmaAllocation allocation, VkDeviceSize size, VkDeviceSize offset) noexcept; - ~GPUallocator() = default; + ~GPUAllocator() = default; private: VmaAllocator m_allocator; diff --git a/runtime/Includes/Renderer/Pipelines/Graphics.h b/runtime/Includes/Renderer/Pipelines/Graphics.h new file mode 100644 index 0000000..027aaf4 --- /dev/null +++ b/runtime/Includes/Renderer/Pipelines/Graphics.h @@ -0,0 +1,57 @@ +#ifndef __MLX_GRAPHICS_PIPELINE__ +#define __MLX_GRAPHICS_PIPELINE__ + +#include +#include +#include +#include + +namespace mlx +{ + struct GraphicPipelineDescriptor + { + std::shared_ptr vertex_shader; + std::shared_ptr fragment_shader; + std::vector> color_attachments; + NonOwningPtr renderer = nullptr; + bool clear_color_attachments = true; + bool no_vertex_inputs = false; + }; + + class GraphicPipeline : public Pipeline + { + public: + GraphicPipeline() = default; + + void Init(const GraphicPipelineDescriptor& descriptor); + bool BindPipeline(VkCommandBuffer command_buffer, std::size_t framebuffer_index, std::array clear) noexcept; + void EndPipeline(VkCommandBuffer command_buffer) noexcept override; + void Destroy() noexcept; + + [[nodiscard]] inline VkPipeline GetPipeline() const override { return m_pipeline; } + [[nodiscard]] inline VkPipelineLayout GetPipelineLayout() const override { return m_pipeline_layout; } + [[nodiscard]] inline VkPipelineBindPoint GetPipelineBindPoint() const override { return VK_PIPELINE_BIND_POINT_GRAPHICS; } + + ~GraphicPipeline() = default; + + private: + void CreateFramebuffers(const std::vector>& render_targets, bool clear_attachments); + void TransitionAttachments(VkCommandBuffer cmd = VK_NULL_HANDLE); + + // Private override to remove access + bool BindPipeline(VkCommandBuffer) noexcept override { return false; }; + + private: + std::vector> m_attachments; + std::vector m_framebuffers; + std::vector m_clears; + std::shared_ptr p_vertex_shader; + std::shared_ptr p_fragment_shader; + VkRenderPass m_renderpass = VK_NULL_HANDLE; + VkPipeline m_pipeline = VK_NULL_HANDLE; + VkPipelineLayout m_pipeline_layout = VK_NULL_HANDLE; + NonOwningPtr p_renderer; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Pipelines/Pipeline.h b/runtime/Includes/Renderer/Pipelines/Pipeline.h index 7cd8ffd..126fb34 100644 --- a/runtime/Includes/Renderer/Pipelines/Pipeline.h +++ b/runtime/Includes/Renderer/Pipelines/Pipeline.h @@ -1,36 +1,23 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Pipeline.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/12/18 21:23:52 by maldavid #+# #+# */ -/* Updated: 2024/03/28 22:15:38 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ +#ifndef __MLX_PIPELINE__ +#define __MLX_PIPELINE__ -#ifndef __PIPELINE__ -#define __PIPELINE__ - -#include +#include namespace mlx { - class GraphicPipeline + class Pipeline { public: - void init(class Renderer& renderer); - void Destroy() noexcept; + Pipeline() = default; - inline void BindPipeline(CommandBuffer& command_buffer) noexcept { vkCmdBindPipeline(command_buffer.Get(), VK_PIPELINE_BIND_POINT_GRAPHICS, m_graphics_pipeline); } + inline virtual bool BindPipeline(VkCommandBuffer command_buffer) noexcept { vkCmdBindPipeline(command_buffer, GetPipelineBindPoint(), GetPipeline()); return true; } + inline virtual void EndPipeline([[maybe_unused]] VkCommandBuffer command_buffer) noexcept {} - inline const VkPipeline& GetPipeline() const noexcept { return m_graphics_pipeline; } - inline const VkPipelineLayout& GetPipelineLayout() const noexcept { return m_pipeline_layout; } + virtual VkPipeline GetPipeline() const = 0; + virtual VkPipelineLayout GetPipelineLayout() const = 0; + virtual VkPipelineBindPoint GetPipelineBindPoint() const = 0; - private: - VkPipeline m_graphics_pipeline = VK_NULL_HANDLE; - VkPipelineLayout m_pipeline_layout = VK_NULL_HANDLE; + virtual ~Pipeline() = default; }; } diff --git a/runtime/Includes/Renderer/Pipelines/Shader.h b/runtime/Includes/Renderer/Pipelines/Shader.h new file mode 100644 index 0000000..d1c59db --- /dev/null +++ b/runtime/Includes/Renderer/Pipelines/Shader.h @@ -0,0 +1,68 @@ +#ifndef __MLX_SHADER__ +#define __MLX_SHADER__ + +namespace mlx +{ + struct ShaderSetLayout + { + std::vector > binds; + + ShaderSetLayout(std::vector > b) : binds(std::move(b)) {} + }; + + struct ShaderPushConstantLayout + { + std::size_t offset; + std::size_t size; + + ShaderPushConstantLayout(std::size_t o, std::size_t s) : offset(o), size(s) {} + }; + + struct ShaderLayout + { + std::vector > set_layouts; + std::vector push_constants; + + ShaderLayout(std::vector > s, std::vector pc) : set_layouts(std::move(s)), push_constants(std::move(pc)) {} + }; + + enum class ShaderType + { + Vertex, + Fragment, + Compute + }; + + struct ShaderPipelineLayoutPart + { + std::vector push_constants; + std::vector set_layouts; + }; + + class Shader + { + public: + Shader(const std::vector& bytecode, ShaderType type, ShaderLayout layout); + + [[nodiscard]] inline const ShaderLayout& GetShaderLayout() const { return m_layout; } + [[nodiscard]] inline const std::vector& GetByteCode() const noexcept { return m_bytecode; } + [[nodiscard]] inline const ShaderPipelineLayoutPart& GetPipelineLayout() const noexcept { return m_pipeline_layout_part; } + [[nodiscard]] inline VkShaderModule GetShaderModule() const noexcept { return m_module; } + [[nodiscard]] inline VkShaderStageFlagBits GetShaderStage() const noexcept { return m_stage; } + + ~Shader(); + + private: + void GeneratePipelineLayout(ShaderLayout layout); + + private: + ShaderLayout m_layout; + ShaderPipelineLayoutPart m_pipeline_layout_part; + std::vector m_bytecode; + std::vector m_set_layouts; + VkShaderStageFlagBits m_stage; + VkShaderModule m_module = VK_NULL_HANDLE; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/PixelPut.h b/runtime/Includes/Renderer/PixelPut.h deleted file mode 100644 index c16d7c9..0000000 --- a/runtime/Includes/Renderer/PixelPut.h +++ /dev/null @@ -1,48 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* PixelPut.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/03/31 13:18:50 by maldavid #+# #+# */ -/* Updated: 2024/04/24 01:46:11 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_PIXEL_PUT__ -#define __MLX_PIXEL_PUT__ - -#include -#include - -namespace mlx -{ - class PixelPutPipeline - { - public: - PixelPutPipeline() = default; - - void Init(std::uint32_t width, std::uint32_t height, class Renderer& renderer) noexcept; - - void SetPixel(int x, int y, std::uint32_t color) noexcept; - void Render(class Renderer& renderer) noexcept; - - void Clear(); - void Destroy() noexcept; - - ~PixelPutPipeline() = default; - - private: - Texture m_texture; - Buffer m_buffer; - // using vector as CPU map and not directly writting to mapped buffer to improve performances - std::vector m_cpu_map; - void* m_buffer_map = nullptr; - std::uint32_t m_width = 0; - std::uint32_t m_height = 0; - bool m_has_been_modified = true; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/RenderCore.h b/runtime/Includes/Renderer/RenderCore.h new file mode 100644 index 0000000..01218a3 --- /dev/null +++ b/runtime/Includes/Renderer/RenderCore.h @@ -0,0 +1,39 @@ +#ifndef __MLX_RENDER_CORE__ +#define __MLX_RENDER_CORE__ + +#include + +namespace mlx +{ + constexpr const int MAX_FRAMES_IN_FLIGHT = 3; + + class RenderCore : public Singleton + { + friend class Singleton; + + public: + void Init() noexcept; + void Destroy() noexcept; + + [[nodiscard]] MLX_FORCEINLINE VkInstance GetInstance() const noexcept { return m_instance; } + [[nodiscard]] MLX_FORCEINLINE VkInstance& GetInstanceRef() noexcept { return m_instance; } + [[nodiscard]] MLX_FORCEINLINE VkDevice GetDevice() const noexcept { return m_device; } + [[nodiscard]] MLX_FORCEINLINE VkPhysicalDevice GetPhysicalDevice() const noexcept { return m_physical_device; } + [[nodiscard]] MLX_FORCEINLINE GPUAllocator& GetAllocator() noexcept { return m_allocator; } + + + inline void WaitDeviceIdle() const noexcept { vkDeviceWaitIdle(m_device); } + + private: + RenderCore() = default; + ~RenderCore() = default; + + private: + GPUAllocator m_allocator; + VkInstance m_instance = VK_NULL_HANDLE; + VkDevice m_device = VK_NULL_HANDLE; + VkPhysicalDevice m_physical_device = VK_NULL_HANDLE; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/RenderPasses/2DPass.h b/runtime/Includes/Renderer/RenderPasses/2DPass.h new file mode 100644 index 0000000..eef1282 --- /dev/null +++ b/runtime/Includes/Renderer/RenderPasses/2DPass.h @@ -0,0 +1,29 @@ +#ifndef __MLX_2D_PASS__ +#define __MLX_2D_PASS__ + +#include +#include +#include + +namespace mlx +{ + class Render2DPass + { + public: + Render2DPass() = default; + void Init(); + void Pass(class Scene& scene, class Renderer& renderer, class Texture& render_target); + void Destroy(); + ~Render2DPass() = default; + + private: + GraphicPipeline m_pipeline; + std::shared_ptr p_viewer_data_set; + std::shared_ptr p_viewer_data_buffer; + std::shared_ptr p_texture_set; + std::shared_ptr p_vertex_shader; + std::shared_ptr p_fragment_shader; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/RenderPasses/FinalPass.h b/runtime/Includes/Renderer/RenderPasses/FinalPass.h new file mode 100644 index 0000000..93a71cf --- /dev/null +++ b/runtime/Includes/Renderer/RenderPasses/FinalPass.h @@ -0,0 +1,27 @@ +#ifndef __MLX_FINAL_PASS__ +#define __MLX_FINAL_PASS__ + +#include +#include +#include + +namespace mlx +{ + class FinalPass + { + public: + FinalPass() = default; + void Init(); + void Pass(class Scene& scene, class Renderer& renderer, class Texture& render_target); + void Destroy(); + ~FinalPass() = default; + + private: + GraphicPipeline m_pipeline; + std::shared_ptr p_set; + std::shared_ptr p_vertex_shader; + std::shared_ptr p_fragment_shader; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/RenderPasses/Passes.h b/runtime/Includes/Renderer/RenderPasses/Passes.h new file mode 100644 index 0000000..7800912 --- /dev/null +++ b/runtime/Includes/Renderer/RenderPasses/Passes.h @@ -0,0 +1,26 @@ +#ifndef __MLX_PASSES__ +#define __MLX_PASSES__ + +#include +#include +#include + +namespace mlx +{ + class RenderPasses + { + public: + RenderPasses() = default; + void Init(); + void Pass(class Scene& scene, class Renderer& renderer); + void Destroy(); + ~RenderPasses() = default; + + private: + Render2DPass m_2Dpass; + FinalPass m_final; + Texture m_main_render_texture; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Renderer.h b/runtime/Includes/Renderer/Renderer.h index a7ff20c..9502fbe 100644 --- a/runtime/Includes/Renderer/Renderer.h +++ b/runtime/Includes/Renderer/Renderer.h @@ -1,31 +1,11 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Renderer.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/12/18 17:14:45 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:40:20 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ +#ifndef __MLX_RENDERER__ +#define __MLX_RENDERER__ -#ifndef __RENDERER__ -#define __RENDERER__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include namespace mlx { @@ -34,57 +14,48 @@ namespace mlx public: Renderer() = default; - void Init(NonOwningPtr render_target); + void Init(NonOwningPtr window); bool BeginFrame(); void EndFrame(); - void Destroy(); + [[nodiscard]] inline VkSwapchainKHR GetSwapchain() const noexcept { return m_swapchain; } + [[nodiscard]] inline VkSurfaceKHR GetSurface() const noexcept { return m_surface; } + [[nodiscard]] inline VkSemaphore GetImageAvailableSemaphore(int index) const noexcept { return m_image_available_semaphores[index]; } + [[nodiscard]] inline VkSemaphore GetRenderFinishedSemaphore(int index) const noexcept { return m_render_finished_semaphores[index]; } + [[nodiscard]] inline VkCommandBuffer GetCommandBuffer(int index) const noexcept { return m_cmd_buffers[index]; } + [[nodiscard]] inline VkCommandBuffer GetActiveCommandBuffer() const noexcept { return m_cmd_buffers[m_current_frame_index]; } + [[nodiscard]] inline const std::vector& GetSwapchainImages() const { return m_swapchain_images; } + [[nodiscard]] inline std::size_t& GetDrawCallsCounterRef() noexcept { return m_drawcalls; } + [[nodiscard]] inline std::size_t& GetPolygonDrawnCounterRef() noexcept { return m_polygons_drawn; } + [[nodiscard]] inline std::size_t GetSwapchainImageIndex() const noexcept { return m_swapchain_image_index; } + [[nodiscard]] inline std::size_t GetCurrentFrameIndex() const noexcept { return m_current_frame_index; } + [[nodiscard]] inline NonOwningPtr GetWindow() const noexcept { return m_window_ptr; } - inline NonOwningPtr GetWindow() { return m_window; } - inline void SetWindow(NonOwningPtr window) { m_window = window; } + MLX_FORCEINLINE constexpr void RequireFramebufferResize() noexcept { m_framebuffers_resize = true; } - inline Surface& GetSurface() noexcept { return m_surface; } - inline NonOwningPtr GetUniformBuffer() noexcept { return m_uniform_buffer.get(); } - inline SwapChain& GetSwapChain() noexcept { return m_swapchain; } - inline RenderPass& GetRenderPass() noexcept { return m_pass; } - inline GraphicPipeline& GetPipeline() noexcept { return m_pipeline; } - inline CommandBuffer& GetCmdBuffer(int i) noexcept { return m_cmd.GetCmdBuffer(i); } - inline CommandBuffer& GetActiveCmdBuffer() noexcept { return m_cmd.GetCmdBuffer(m_current_frame_index); } - inline FrameBuffer& GetFrameBuffer(int i) noexcept { return m_framebuffers[i]; } - inline DescriptorSet& GetVertDescriptorSet() noexcept { return m_vert_set; } - inline DescriptorSet& GetFragDescriptorSet() noexcept { return m_frag_set; } - inline std::uint32_t GetActiveImageIndex() noexcept { return m_current_frame_index; } - inline std::uint32_t GetImageIndex() noexcept { return m_image_index; } - - constexpr inline void RequireFrameBufferResize() noexcept { m_framebuffer_resized = true; } + void Destroy() noexcept; ~Renderer() = default; private: - void RecreateRenderData(); + void CreateSwapchain(); + void DestroySwapchain(); private: - GraphicPipeline m_pipeline; - CommandManager m_cmd; - RenderPass m_pass; - Surface m_surface; - SwapChain m_swapchain; - std::array m_render_finished_semaphores; - std::array m_image_available_semaphores; - std::vector m_framebuffers; - - DescriptorSet m_vert_set; - DescriptorSet m_frag_set; - - std::unique_ptr m_uniform_buffer; - - NonOwningPtr m_window; - NonOwningPtr m_render_target; - + std::array m_image_available_semaphores; + std::array m_render_finished_semaphores; + std::array m_cmd_buffers; + std::array m_cmd_fences; + std::vector m_swapchain_images; + NonOwningPtr p_window; + VkSurfaceKHR m_surface = VK_NULL_HANDLE; + VkSwapchainKHR m_swapchain = VK_NULL_HANDLE; std::uint32_t m_current_frame_index = 0; - std::uint32_t m_image_index = 0; - bool m_framebuffer_resized = false; + std::uint32_t m_swapchain_image_index = 0; + std::size_t m_drawcalls = 0; + std::size_t m_polygons_drawn = 0; + bool m_framebuffers_resize = false; }; } diff --git a/runtime/Includes/Renderer/Renderpass/FrameBuffer.h b/runtime/Includes/Renderer/Renderpass/FrameBuffer.h deleted file mode 100644 index ec6abe5..0000000 --- a/runtime/Includes/Renderer/Renderpass/FrameBuffer.h +++ /dev/null @@ -1,36 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* FrameBuffer.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:19:44 by maldavid #+# #+# */ -/* Updated: 2024/03/28 22:16:02 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_FRAMEBUFFER__ -#define __MLX_VK_FRAMEBUFFER__ - -namespace mlx -{ - class FrameBuffer - { - public: - void Init(class RenderPass& renderpass, class Image& image); - void Destroy() noexcept; - - inline VkFramebuffer& operator()() noexcept { return m_framebuffer; } - inline VkFramebuffer& Get() noexcept { return m_framebuffer; } - inline std::uint32_t GetWidth() const noexcept { return m_width; } - inline std::uint32_t GetHeight() const noexcept { return m_height; } - - private: - VkFramebuffer m_framebuffer = VK_NULL_HANDLE; - std::uint32_t m_width = 0; - std::uint32_t m_height = 0; - }; -} - -#endif // __MLX_VK_FRAMEBUFFER__ diff --git a/runtime/Includes/Renderer/Renderpass/RenderPass.h b/runtime/Includes/Renderer/Renderpass/RenderPass.h deleted file mode 100644 index 18c6c03..0000000 --- a/runtime/Includes/Renderer/Renderpass/RenderPass.h +++ /dev/null @@ -1,36 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* RenderPass.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:22:00 by maldavid #+# #+# */ -/* Updated: 2024/03/28 22:16:44 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_RENDER_PASS__ -#define __MLX_VK_RENDER_PASS__ - -namespace mlx -{ - class RenderPass - { - public: - void Init(VkFormat attachement_format, VkImageLayout layout); - void Destroy() noexcept; - - void Begin(class CmommandBuffer& cmd, class FrameBuffer& fb); - void End(class CommandBuffer& cmd); - - inline VkRenderPass& operator()() noexcept { return m_render_pass; } - inline VkRenderPass& Get() noexcept { return m_render_pass; } - - private: - VkRenderPass m_render_pass = VK_NULL_HANDLE; - bool m_is_running = false; - }; -} - -#endif // __MLX_VK_RENDER_PASS__ diff --git a/runtime/Includes/Renderer/Renderpass/Swapchain.h b/runtime/Includes/Renderer/Renderpass/Swapchain.h deleted file mode 100644 index f9c8054..0000000 --- a/runtime/Includes/Renderer/Renderpass/Swapchain.h +++ /dev/null @@ -1,65 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Swapchain.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:23:27 by maldavid #+# #+# */ -/* Updated: 2024/03/28 22:18:15 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_SWAPCHAIN__ -#define __MLX_VK_SWAPCHAIN__ - -#include - -namespace mlx -{ - class SwapChain - { - friend class GraphicPipeline; - friend class RenderPass; - friend class Renderer; - - public: - struct SwapChainSupportDetails - { - VkSurfaceCapabilitiesKHR capabilities; - std::vector formats; - std::vector present_modes; - }; - - public: - SwapChain() = default; - - void Init(NonOwningPtr renderer); - void Recreate(); - void Destroy() noexcept; - - SwapChainSupportDetails QuerySwapChainSupport(VkPhysicalDevice device); - VkExtent2D ChooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities); - VkPresentModeKHR ChooseSwapPresentMode([[maybe_unused]] const std::vector &available_present_modes); - - inline VkSwapchainKHR Get() noexcept { return m_swapchain; } - inline VkSwapchainKHR operator()() noexcept { return m_swapchain; } - inline std::size_t GetImagesNumber() const noexcept { return m_images.size(); } - inline Image& GetImage(std::size_t i) noexcept { return m_images[i]; } - inline SwapChainSupportDetails GetSupport() noexcept { return m_swapchain_support; } - inline VkExtent2D GetExtent() noexcept { return m_extent; } - inline VkFormat GetImagesFormat() const noexcept { return m_swapchain_image_format; } - - ~SwapChain() = default; - - private: - SwapChainSupportDetails m_swapchain_support; - VkSwapchainKHR m_swapchain; - std::vector m_images; - VkFormat m_swapchain_image_format; - VkExtent2D m_extent; - NonOwningPtr m_renderer; - }; -} - -#endif // __MLX_VK_SWAPCHAIN__ diff --git a/runtime/Includes/Renderer/ScenesRenderer.h b/runtime/Includes/Renderer/ScenesRenderer.h new file mode 100644 index 0000000..03c391e --- /dev/null +++ b/runtime/Includes/Renderer/ScenesRenderer.h @@ -0,0 +1,22 @@ +#ifndef __MLX_SCENES_RENDERER__ +#define __MLX_SCENES_RENDERER__ + +#include + +namespace mlx +{ + class SceneRenderer + { + public: + SceneRenderer() = default; + void Init(); + void Render(class Scene& scene, class Renderer& renderer); // TODO : add RTT support + void Destroy(); + ~SceneRenderer() = default; + + private: + RenderPasses m_passes; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Texts/Font.h b/runtime/Includes/Renderer/Texts/Font.h deleted file mode 100644 index 91b6c4c..0000000 --- a/runtime/Includes/Renderer/Texts/Font.h +++ /dev/null @@ -1,55 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Font.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/11 21:17:04 by kbz_8 #+# #+# */ -/* Updated: 2024/07/05 13:53:11 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_FONT__ -#define __MLX_FONT__ - -#include -#include - -namespace mlx -{ - class Font - { - friend class FontLibrary; - - public: - Font() = delete; - Font(class Renderer& renderer, const std::filesystem::path& path, float scale); - Font(class Renderer& renderer, const std::string& name, const std::vector& ttf_data, float scale); - - inline const std::string& GetName() const { return m_name; } - inline float GetScale() const noexcept { return m_scale; } - inline const std::array& GetCharData() const { return m_cdata; } - inline const TextureAtlas& GetAtlas() const noexcept { return m_atlas; } - inline bool operator==(const Font& rhs) const { return rhs.m_name == m_name && rhs.m_scale == m_scale; } - inline bool operator!=(const Font& rhs) const { return rhs.m_name != m_name || rhs.m_scale != m_scale; } - - void Destroy(); - - ~Font(); - - private: - void BuildFont(); - - private: - std::array m_cdata; - TextureAtlas m_atlas; - std::variant> m_build_data; - std::string m_name; - class Renderer& m_renderer; - float m_scale = 0; - bool m_is_init = false; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Texts/FontLibrary.h b/runtime/Includes/Renderer/Texts/FontLibrary.h deleted file mode 100644 index 433c5f5..0000000 --- a/runtime/Includes/Renderer/Texts/FontLibrary.h +++ /dev/null @@ -1,47 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* FontLibrary.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/18 09:26:03 by maldavid #+# #+# */ -/* Updated: 2024/03/28 22:21:53 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_FONT_LIBRARY__ -#define __MLX_FONT_LIBRARY__ - -#include -#include -#include - -namespace mlx -{ - using FontID = std::uint32_t; - constexpr FontID nullfont = 0; - - class FontLibrary : public Singleton - { - friend class Singleton; - - public: - std::shared_ptr GetFontData(FontID id); - FontID AddFontToLibrary(std::shared_ptr font); - void RemoveFontFromLibrary(FontID id); - - void ClearLibrary(); - - private: - FontLibrary() = default; - ~FontLibrary() = default; - - private: - std::unordered_map> m_cache; - std::vector m_invalid_ids; - FontID m_current_id = 1; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Texts/Text.h b/runtime/Includes/Renderer/Texts/Text.h deleted file mode 100644 index 07b6d10..0000000 --- a/runtime/Includes/Renderer/Texts/Text.h +++ /dev/null @@ -1,49 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Text.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/11 00:09:04 by maldavid #+# #+# */ -/* Updated: 2024/03/28 22:23:50 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXT__ -#define __MLX_TEXT__ - -#include -#include -#include -#include - -namespace mlx -{ - class Text - { - public: - Text() = default; - - void Init(std::string text, FontID font, std::uint32_t color, std::vector vbo_data, std::vector ibo_data); - void Bind(class Renderer& renderer) noexcept; - inline FontID GetFontInUse() const noexcept { return m_font; } - void UpdateVertexData(int frame, std::vector vbo_data); - inline std::uint32_t GetIBOsize() noexcept { return m_ibo.GetSize(); } - inline const std::string& GetText() const { return m_text; } - inline std::uint32_t GetColor() const noexcept { return m_color; } - void Destroy() noexcept; - - ~Text(); - - private: - std::array m_vbo; - ConstantIndexBuffer m_ibo; - std::string m_text; - std::uint32_t m_color; - FontID m_font = nullfont; - bool m_is_init = false; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Texts/TextDescriptor.h b/runtime/Includes/Renderer/Texts/TextDescriptor.h deleted file mode 100644 index 4b5f72a..0000000 --- a/runtime/Includes/Renderer/Texts/TextDescriptor.h +++ /dev/null @@ -1,62 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* TextDescriptor.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/11 00:13:34 by maldavid #+# #+# */ -/* Updated: 2024/07/05 14:03:43 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXT_DESCRIPTOR__ -#define __MLX_TEXT_DESCRIPTOR__ - -#include -#include -#include -#include - -namespace mlx -{ - class TextDrawDescriptor : public DrawableResource - { - friend class std::hash; - - public: - TextID id; - std::uint32_t color; - int x; - int y; - - public: - TextDrawDescriptor(std::string text, std::uint32_t _color, int _x, int _y); - - void Init(FontID font) noexcept; - bool operator==(const TextDrawDescriptor& rhs) const { return m_text == rhs.m_text && x == rhs.x && y == rhs.y && color == rhs.color; } - void Render(Renderer& renderer) override; - void ResetUpdate() override; - - TextDrawDescriptor() = default; - - private: - std::string m_text; - }; -} - -namespace std -{ - template <> - struct hash - { - std::size_t operator()(const mlx::TextDrawDescriptor& d) const noexcept - { - std::size_t hash = 0; - mlx::HashCombine(hash, d.x, d.y, d.color, d.m_text); - return hash; - } - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Texts/TextLibrary.h b/runtime/Includes/Renderer/Texts/TextLibrary.h deleted file mode 100644 index 63385ea..0000000 --- a/runtime/Includes/Renderer/Texts/TextLibrary.h +++ /dev/null @@ -1,48 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* TextLibrary.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/10 11:52:30 by maldavid #+# #+# */ -/* Updated: 2024/03/28 22:26:10 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXT_LIBRARY__ -#define __MLX_TEXT_LIBRARY__ - -#include -#include -#include -#include -#include - -namespace mlx -{ - using TextID = std::uint32_t; - constexpr TextID nulltext = 0; - - class TextLibrary : public Singleton - { - friend class Singleton; - - public: - std::shared_ptr GetTextData(TextID id); - TextID AddTextToLibrary(std::shared_ptr text); - void RemoveTextFromLibrary(TextID id); - - void ClearLibrary(); - - private: - TextLibrary() = default; - ~TextLibrary() = default; - - private: - std::unordered_map> m_cache; - TextID m_current_id = 1; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Texts/TextManager.h b/runtime/Includes/Renderer/Texts/TextManager.h deleted file mode 100644 index a2d57db..0000000 --- a/runtime/Includes/Renderer/Texts/TextManager.h +++ /dev/null @@ -1,43 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* TextManager.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/06 16:24:11 by maldavid #+# #+# */ -/* Updated: 2024/03/28 22:27:32 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXT_MANAGER__ -#define __MLX_TEXT_MANAGER__ - -#include -#include -#include -#include -#include - -namespace mlx -{ - class TextManager - { - public: - TextManager() = default; - - void Init(Renderer& renderer) noexcept; - std::pair, bool> RegisterText(int x, int y, std::uint32_t color, std::string str); - inline void Clear() { m_text_descriptors.clear(); } - void LoadFont(Renderer& renderer, const std::filesystem::path& filepath, float scale); - void Destroy() noexcept; - - ~TextManager() = default; - - private: - std::unordered_set m_text_descriptors; - FontID m_font_in_use = nullfont; - }; -} - -#endif diff --git a/runtime/Includes/Renderer/Vertex.h b/runtime/Includes/Renderer/Vertex.h index 285825e..bcc501e 100644 --- a/runtime/Includes/Renderer/Vertex.h +++ b/runtime/Includes/Renderer/Vertex.h @@ -1,60 +1,25 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Vertex.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/04/23 22:24:33 by maldavid #+# #+# */ -/* Updated: 2024/04/23 22:25:01 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ +#ifndef __MLX_VERTEX__ +#define __MLX_VERTEX__ -#ifndef __MLX_RENDERER_VERTEX__ -#define __MLX_RENDERER_VERTEX__ +#include +#include namespace mlx { struct Vertex { - glm::vec2 pos; - glm::vec4 color; - glm::vec2 uv; + alignas(16) Vec2f position = Vec4f{ 0.0f, 0.0f }; + alignas(16) Vec4f color = Vec4f{ 1.0f, 1.0f, 1.0f, 1.0f }; + alignas(16) Vec2f uv = Vec2f{ 0.0f, 0.0f }; - Vertex(glm::vec2 _pos, glm::vec4 _color, glm::vec2 _uv) : pos(std::move(_pos)), color(std::move(_color)), uv(std::move(_uv)) {} + Vertex() = default; + Vertex(Vec2f p, Vec4f c, Vec2f u) : position(std::move(p)), color(std::move(c)), uv(std::move(u)) {} - static VkVertexInputBindingDescription GetBindingDescription() - { - VkVertexInputBindingDescription binding_description{}; - binding_description.binding = 0; - binding_description.stride = sizeof(Vertex); - binding_description.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; - - return binding_description; - } - - static std::array GetAttributeDescriptions() - { - std::array attribute_descriptions; - - attribute_descriptions[0].binding = 0; - attribute_descriptions[0].location = 0; - attribute_descriptions[0].format = VK_FORMAT_R32G32_SFLOAT; - attribute_descriptions[0].offset = offsetof(Vertex, pos); - - attribute_descriptions[1].binding = 0; - attribute_descriptions[1].location = 1; - attribute_descriptions[1].format = VK_FORMAT_R32G32B32A32_SFLOAT; - attribute_descriptions[1].offset = offsetof(Vertex, color); - - attribute_descriptions[2].binding = 0; - attribute_descriptions[2].location = 2; - attribute_descriptions[2].format = VK_FORMAT_R32G32_SFLOAT; - attribute_descriptions[2].offset = offsetof(Vertex, uv); - - return attribute_descriptions; - } + [[nodiscard]] inline static VkVertexInputBindingDescription GetBindingDescription(); + [[nodiscard]] inline static std::array GetAttributeDescriptions(); }; } +#include + #endif diff --git a/runtime/Includes/Renderer/Vertex.inl b/runtime/Includes/Renderer/Vertex.inl new file mode 100644 index 0000000..0c6b9ea --- /dev/null +++ b/runtime/Includes/Renderer/Vertex.inl @@ -0,0 +1,36 @@ +#pragma once +#include + +namespace mlx +{ + VkVertexInputBindingDescription Vertex::GetBindingDescription() + { + VkVertexInputBindingDescription binding_description{}; + binding_description.binding = 0; + binding_description.stride = sizeof(Vertex); + binding_description.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; + return binding_description; + } + + std::array Vertex::GetAttributeDescriptions() + { + std::array attribute_descriptions; + + attribute_descriptions[0].binding = 0; + attribute_descriptions[0].location = 0; + attribute_descriptions[0].format = VK_FORMAT_R32G32_SFLOAT; + attribute_descriptions[0].offset = offsetof(Vertex, position); + + attribute_descriptions[1].binding = 0; + attribute_descriptions[1].location = 1; + attribute_descriptions[1].format = VK_FORMAT_R32G32B32A32_SFLOAT; + attribute_descriptions[1].offset = offsetof(Vertex, color); + + attribute_descriptions[2].binding = 0; + attribute_descriptions[2].location = 2; + attribute_descriptions[2].format = VK_FORMAT_R32G32_SFLOAT; + attribute_descriptions[2].offset = offsetof(Vertex, uv); + + return attribute_descriptions; + } +} diff --git a/runtime/Includes/Renderer/ViewerData.h b/runtime/Includes/Renderer/ViewerData.h new file mode 100644 index 0000000..e9685b6 --- /dev/null +++ b/runtime/Includes/Renderer/ViewerData.h @@ -0,0 +1,14 @@ +#ifndef __MLX_VIEWER_DATA__ +#define __MLX_VIEWER_DATA__ + +#include + +namespace mlx +{ + struct ViewerData + { + Mat4f projection_matrix; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Vulkan/VulkanPrototypes.h b/runtime/Includes/Renderer/Vulkan/VulkanPrototypes.h new file mode 100644 index 0000000..fd96c65 --- /dev/null +++ b/runtime/Includes/Renderer/Vulkan/VulkanPrototypes.h @@ -0,0 +1,170 @@ +#ifndef __SCOP_VK_PROTOTYPES__ +#define __SCOP_VK_PROTOTYPES__ + +#if defined(VULKAN_H_) && !defined(VK_NO_PROTOTYPES) + #error "define VK_NO_PROTOTYPES needed" +#endif + +#ifndef VK_NO_PROTOTYPES + #define VK_NO_PROTOTYPES +#endif + +#ifndef VULKAN_H_ + #include +#endif + +#if defined(VK_VERSION_1_0) + extern PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; + extern PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; + extern PFN_vkAllocateMemory vkAllocateMemory; + extern PFN_vkBeginCommandBuffer vkBeginCommandBuffer; + extern PFN_vkBindBufferMemory vkBindBufferMemory; + extern PFN_vkBindImageMemory vkBindImageMemory; + extern PFN_vkCmdBeginQuery vkCmdBeginQuery; + extern PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass; + extern PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; + extern PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; + extern PFN_vkCmdBindPipeline vkCmdBindPipeline; + extern PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers; + extern PFN_vkCmdBlitImage vkCmdBlitImage; + extern PFN_vkCmdClearAttachments vkCmdClearAttachments; + extern PFN_vkCmdClearColorImage vkCmdClearColorImage; + extern PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage; + extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; + extern PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; + extern PFN_vkCmdCopyImage vkCmdCopyImage; + extern PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; + extern PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults; + extern PFN_vkCmdDispatch vkCmdDispatch; + extern PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect; + extern PFN_vkCmdDraw vkCmdDraw; + extern PFN_vkCmdDrawIndexed vkCmdDrawIndexed; + extern PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect; + extern PFN_vkCmdDrawIndirect vkCmdDrawIndirect; + extern PFN_vkCmdEndQuery vkCmdEndQuery; + extern PFN_vkCmdEndRenderPass vkCmdEndRenderPass; + extern PFN_vkCmdExecuteCommands vkCmdExecuteCommands; + extern PFN_vkCmdFillBuffer vkCmdFillBuffer; + extern PFN_vkCmdNextSubpass vkCmdNextSubpass; + extern PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; + extern PFN_vkCmdPushConstants vkCmdPushConstants; + extern PFN_vkCmdResetEvent vkCmdResetEvent; + extern PFN_vkCmdResetQueryPool vkCmdResetQueryPool; + extern PFN_vkCmdResolveImage vkCmdResolveImage; + extern PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; + extern PFN_vkCmdSetDepthBias vkCmdSetDepthBias; + extern PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; + extern PFN_vkCmdSetEvent vkCmdSetEvent; + extern PFN_vkCmdSetLineWidth vkCmdSetLineWidth; + extern PFN_vkCmdSetScissor vkCmdSetScissor; + extern PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; + extern PFN_vkCmdSetStencilReference vkCmdSetStencilReference; + extern PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; + extern PFN_vkCmdSetViewport vkCmdSetViewport; + extern PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer; + extern PFN_vkCmdWaitEvents vkCmdWaitEvents; + extern PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp; + extern PFN_vkCreateBuffer vkCreateBuffer; + extern PFN_vkCreateBufferView vkCreateBufferView; + extern PFN_vkCreateCommandPool vkCreateCommandPool; + extern PFN_vkCreateComputePipelines vkCreateComputePipelines; + extern PFN_vkCreateDescriptorPool vkCreateDescriptorPool; + extern PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; + extern PFN_vkCreateDevice vkCreateDevice; + extern PFN_vkCreateEvent vkCreateEvent; + extern PFN_vkCreateFence vkCreateFence; + extern PFN_vkCreateFramebuffer vkCreateFramebuffer; + extern PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; + extern PFN_vkCreateImage vkCreateImage; + extern PFN_vkCreateImageView vkCreateImageView; + extern PFN_vkCreateInstance vkCreateInstance; + extern PFN_vkCreatePipelineCache vkCreatePipelineCache; + extern PFN_vkCreatePipelineLayout vkCreatePipelineLayout; + extern PFN_vkCreateQueryPool vkCreateQueryPool; + extern PFN_vkCreateRenderPass vkCreateRenderPass; + extern PFN_vkCreateSampler vkCreateSampler; + extern PFN_vkCreateSemaphore vkCreateSemaphore; + extern PFN_vkCreateShaderModule vkCreateShaderModule; + extern PFN_vkDestroyBuffer vkDestroyBuffer; + extern PFN_vkDestroyBufferView vkDestroyBufferView; + extern PFN_vkDestroyCommandPool vkDestroyCommandPool; + extern PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; + extern PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; + extern PFN_vkDestroyDevice vkDestroyDevice; + extern PFN_vkDestroyEvent vkDestroyEvent; + extern PFN_vkDestroyFence vkDestroyFence; + extern PFN_vkDestroyFramebuffer vkDestroyFramebuffer; + extern PFN_vkDestroyImage vkDestroyImage; + extern PFN_vkDestroyImageView vkDestroyImageView; + extern PFN_vkDestroyInstance vkDestroyInstance; + extern PFN_vkDestroyPipeline vkDestroyPipeline; + extern PFN_vkDestroyPipelineCache vkDestroyPipelineCache; + extern PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; + extern PFN_vkDestroyQueryPool vkDestroyQueryPool; + extern PFN_vkDestroyRenderPass vkDestroyRenderPass; + extern PFN_vkDestroySampler vkDestroySampler; + extern PFN_vkDestroySemaphore vkDestroySemaphore; + extern PFN_vkDestroyShaderModule vkDestroyShaderModule; + extern PFN_vkDeviceWaitIdle vkDeviceWaitIdle; + extern PFN_vkEndCommandBuffer vkEndCommandBuffer; + extern PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties; + extern PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties; + extern PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties; + extern PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties; + extern PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices; + extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; + extern PFN_vkFreeCommandBuffers vkFreeCommandBuffers; + extern PFN_vkFreeDescriptorSets vkFreeDescriptorSets; + extern PFN_vkFreeMemory vkFreeMemory; + extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; + extern PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment; + extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; + extern PFN_vkGetDeviceQueue vkGetDeviceQueue; + extern PFN_vkGetEventStatus vkGetEventStatus; + extern PFN_vkGetFenceStatus vkGetFenceStatus; + extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; + extern PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements; + extern PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout; + extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; + extern PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures; + extern PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties; + extern PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties; + extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; + extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; + extern PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties; + extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties; + extern PFN_vkGetPipelineCacheData vkGetPipelineCacheData; + extern PFN_vkGetQueryPoolResults vkGetQueryPoolResults; + extern PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity; + extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; + extern PFN_vkMapMemory vkMapMemory; + extern PFN_vkMergePipelineCaches vkMergePipelineCaches; + extern PFN_vkQueueBindSparse vkQueueBindSparse; + extern PFN_vkQueueSubmit vkQueueSubmit; + extern PFN_vkQueueWaitIdle vkQueueWaitIdle; + extern PFN_vkResetCommandBuffer vkResetCommandBuffer; + extern PFN_vkResetCommandPool vkResetCommandPool; + extern PFN_vkResetDescriptorPool vkResetDescriptorPool; + extern PFN_vkResetEvent vkResetEvent; + extern PFN_vkResetFences vkResetFences; + extern PFN_vkSetEvent vkSetEvent; + extern PFN_vkUnmapMemory vkUnmapMemory; + extern PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; + extern PFN_vkWaitForFences vkWaitForFences; +#endif /* defined(VK_VERSION_1_0) */ +#if defined(VK_KHR_swapchain) + extern PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; + extern PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; + extern PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; + extern PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; + extern PFN_vkQueuePresentKHR vkQueuePresentKHR; +#endif /* defined(VK_KHR_swapchain) */ +#if defined(VK_KHR_surface) + extern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; + extern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR; + extern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR; + extern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR; + extern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; +#endif /* defined(VK_KHR_surface) */ + +#endif diff --git a/runtime/Includes/Utils/Ansi.h b/runtime/Includes/Utils/Ansi.h index 42eedeb..a299d74 100644 --- a/runtime/Includes/Utils/Ansi.h +++ b/runtime/Includes/Utils/Ansi.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Ansi.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 17:22:41 by maldavid #+# #+# */ -/* Updated: 2024/03/27 17:23:34 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_ANSI__ #define __MLX_ANSI__ diff --git a/runtime/Includes/Utils/Buffer.h b/runtime/Includes/Utils/Buffer.h new file mode 100644 index 0000000..fe95d25 --- /dev/null +++ b/runtime/Includes/Utils/Buffer.h @@ -0,0 +1,41 @@ +#ifndef __MLX_CPU_BUFFER__ +#define __MLX_CPU_BUFFER__ + +namespace mlx +{ + class CPUBuffer + { + public: + CPUBuffer() {} + CPUBuffer(std::size_t size) try : m_data(new std::uint8_t[size]), m_size(size) + {} + catch(...) + { + FatalError("memory allocation for a CPU buffer failed"); + } + + [[nodiscard]] inline CPUBuffer Duplicate() const + { + CPUBuffer buffer(m_size); + std::memcpy(buffer.GetData(), m_data.get(), m_size); + return buffer; + } + + inline bool Empty() const { return m_size == 0; } + + [[nodiscard]] inline std::size_t GetSize() const noexcept { return m_size; } + + template + [[nodiscard]] inline T* GetDataAs() const { return reinterpret_cast(m_data.get()); } + [[nodiscard]] inline std::uint8_t* GetData() const { return m_data.get(); } + inline operator bool() const { return (bool)m_data; } + + ~CPUBuffer() = default; + + private: + std::shared_ptr m_data; + std::size_t m_size = 0; + }; +} + +#endif diff --git a/runtime/Includes/Utils/CombineHash.h b/runtime/Includes/Utils/CombineHash.h index a11a35c..b334300 100644 --- a/runtime/Includes/Utils/CombineHash.h +++ b/runtime/Includes/Utils/CombineHash.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* CombineHash.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/14 16:16:06 by maldavid #+# #+# */ -/* Updated: 2024/03/27 21:59:30 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_HASH__ #define __MLX_HASH__ diff --git a/runtime/Includes/Utils/ConstMap.h b/runtime/Includes/Utils/ConstMap.h index a4e8c40..74666ae 100644 --- a/runtime/Includes/Utils/ConstMap.h +++ b/runtime/Includes/Utils/ConstMap.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* ConstMap.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 15:26:39 by maldavid #+# #+# */ -/* Updated: 2024/03/27 21:59:35 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_CONST_MAP__ #define __MLX_CONST_MAP__ diff --git a/runtime/Includes/Utils/NonCopyable.h b/runtime/Includes/Utils/NonCopyable.h index abc402a..aa9c806 100644 --- a/runtime/Includes/Utils/NonCopyable.h +++ b/runtime/Includes/Utils/NonCopyable.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* non_copyable.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:20:13 by maldavid #+# #+# */ -/* Updated: 2024/03/24 14:42:48 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_NON_COPYABLE__ #define __MLX_NON_COPYABLE__ diff --git a/runtime/Includes/Utils/NonOwningPtr.h b/runtime/Includes/Utils/NonOwningPtr.h index 2acc906..8ece694 100644 --- a/runtime/Includes/Utils/NonOwningPtr.h +++ b/runtime/Includes/Utils/NonOwningPtr.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* NonOwningPtr.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 21:03:37 by maldavid #+# #+# */ -/* Updated: 2024/04/21 20:21:56 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_NON_OWNING_PTR__ #define __MLX_NON_OWNING_PTR__ diff --git a/runtime/Includes/Utils/NonOwningPtr.inl b/runtime/Includes/Utils/NonOwningPtr.inl index b36a337..b35bee5 100644 --- a/runtime/Includes/Utils/NonOwningPtr.inl +++ b/runtime/Includes/Utils/NonOwningPtr.inl @@ -1,15 +1,4 @@ -/* **************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* NonOwningPtr.inl :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 21:04:00 by maldavid #+# #+# */ -/* Updated: 2024/03/27 21:04:00 by maldavid ### ########.fr */ -/* */ -/* **************************************************************************** */ - +#pragma once #include namespace mlx diff --git a/runtime/Includes/Utils/Singleton.h b/runtime/Includes/Utils/Singleton.h index 33cc136..e5e76ad 100644 --- a/runtime/Includes/Utils/Singleton.h +++ b/runtime/Includes/Utils/Singleton.h @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Singleton.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:18:46 by maldavid #+# #+# */ -/* Updated: 2024/03/27 18:20:11 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_SINGLETON__ #define __MLX_SINGLETON__ diff --git a/runtime/Sources/Core/Application.cpp b/runtime/Sources/Core/Application.cpp index a097b9c..3becaf4 100644 --- a/runtime/Sources/Core/Application.cpp +++ b/runtime/Sources/Core/Application.cpp @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Application.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 22:10:52 by maldavid #+# #+# */ -/* Updated: 2024/05/25 16:06:57 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #include #include diff --git a/runtime/Sources/Core/Bridge.cpp b/runtime/Sources/Core/Bridge.cpp index 6a550bc..ca0ecef 100644 --- a/runtime/Sources/Core/Bridge.cpp +++ b/runtime/Sources/Core/Bridge.cpp @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Bridge.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 17:35:20 by maldavid #+# #+# */ -/* Updated: 2024/04/23 14:44:27 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #include #include diff --git a/runtime/Sources/Core/EventBus.cpp b/runtime/Sources/Core/EventBus.cpp index 550882b..721796a 100644 --- a/runtime/Sources/Core/EventBus.cpp +++ b/runtime/Sources/Core/EventBus.cpp @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* EventBus.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 17:36:05 by maldavid #+# #+# */ -/* Updated: 2024/03/27 17:37:01 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #include #include #include diff --git a/runtime/Sources/Core/EventListener.cpp b/runtime/Sources/Core/EventListener.cpp index cb979e1..21540f5 100644 --- a/runtime/Sources/Core/EventListener.cpp +++ b/runtime/Sources/Core/EventListener.cpp @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* EventListener.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 17:37:09 by maldavid #+# #+# */ -/* Updated: 2024/03/27 17:37:38 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #include #include diff --git a/runtime/Sources/Core/Fps.cpp b/runtime/Sources/Core/Fps.cpp index 59a0926..a464f67 100644 --- a/runtime/Sources/Core/Fps.cpp +++ b/runtime/Sources/Core/Fps.cpp @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Fps.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/18 14:56:17 by maldavid #+# #+# */ -/* Updated: 2024/03/27 20:53:11 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #include #include diff --git a/runtime/Sources/Core/Graphics.cpp b/runtime/Sources/Core/Graphics.cpp index b9ea623..52b4419 100644 --- a/runtime/Sources/Core/Graphics.cpp +++ b/runtime/Sources/Core/Graphics.cpp @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Graphics.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/02 15:13:55 by maldavid #+# #+# */ -/* Updated: 2024/04/23 14:03:51 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #include #include diff --git a/runtime/Sources/Core/Logs.cpp b/runtime/Sources/Core/Logs.cpp index b70ca49..ed15367 100644 --- a/runtime/Sources/Core/Logs.cpp +++ b/runtime/Sources/Core/Logs.cpp @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Logs.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/03/27 17:20:55 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:31:02 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #include #include diff --git a/runtime/Sources/Core/Memory.cpp b/runtime/Sources/Core/Memory.cpp index 670061b..36f7689 100644 --- a/runtime/Sources/Core/Memory.cpp +++ b/runtime/Sources/Core/Memory.cpp @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Memory.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/07 16:32:01 by kbz_8 #+# #+# */ -/* Updated: 2024/04/23 14:05:52 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #include #include diff --git a/runtime/Sources/Core/Profiler.cpp b/runtime/Sources/Core/Profiler.cpp index e811bc5..886668a 100644 --- a/runtime/Sources/Core/Profiler.cpp +++ b/runtime/Sources/Core/Profiler.cpp @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Profiler.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/10 13:56:21 by maldavid #+# #+# */ -/* Updated: 2024/04/23 14:08:51 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #include #include diff --git a/runtime/Sources/Core/SDLManager.cpp b/runtime/Sources/Core/SDLManager.cpp index c0f27b9..c4ce300 100644 --- a/runtime/Sources/Core/SDLManager.cpp +++ b/runtime/Sources/Core/SDLManager.cpp @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* SDLManager.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/05/25 15:44:03 by maldavid #+# #+# */ -/* Updated: 2024/05/25 16:46:48 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #include #include #include @@ -52,14 +40,76 @@ namespace mlx if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS | SDL_INIT_TIMER) != 0) FatalError("SDL : unable to init all subsystems; %", SDL_GetError()); + + struct WatcherData + { + func::function callback; + NonOwningPtr manager; + void* userdata; + }; + + WatcherData watcher_data; + watcher_data.callback = f_callback; + watcher_data.userdata = p_callback_data; + + SDL_AddEventWatch([](void* userdata, SDL_Event* event) -> int + { + WatcherData* data = static_cast(userdata); + + if(event->type == SDL_MOUSEMOTION) + { + } + + std::uint32_t id = event->window.windowID; + if(events_hooks.find(id) == events_hooks.end()) + continue; + switch(event->type) + { + case SDL_KEYUP: data->callback(MLX_KEYUP, event->key.keysym.scancode, data->userdata); break; + case SDL_KEYDOWN: data->callback(MLX_KEYDOWN, event->key.keysym.scancode, data->userdata); break; + case SDL_MOUSEBUTTONUP: data->callback(MLX_MOUSEUP, event->button.button, data->userdata); break; + case SDL_MOUSEBUTTONDOWN: data->callback(MLX_MOUSEDOWN, event->button.button, data->userdata); break; + case SDL_MOUSEWHEEL: + { + if(event->wheel.y > 0) // scroll up + data->callback(MLX_MOUSEWHEEL, 1, data->userdata); + else if(event->wheel.y < 0) // scroll down + data->callback(MLX_MOUSEWHEEL, 2, data->userdata); + if(event->wheel.x > 0) // scroll right + data->callback(MLX_MOUSEWHEEL, 3, data->userdata); + else if(event->wheel.x < 0) // scroll left + data->callback(MLX_MOUSEWHEEL, 4, data->userdata); + break; + } + case SDL_WINDOWEVENT: + { + switch(event.window.event) + { + case SDL_WINDOWEVENT_CLOSE: data->callback(MLX_WINDOW_EVENT, 0, data->userdata); break; + case SDL_WINDOWEVENT_MOVED: data->callback(MLX_WINDOW_EVENT, 1, data->userdata); break; + case SDL_WINDOWEVENT_MINIMIZED: data->callback(MLX_WINDOW_EVENT, 2, data->userdata); break; + case SDL_WINDOWEVENT_MAXIMIZED: data->callback(MLX_WINDOW_EVENT, 3, data->userdata); break; + case SDL_WINDOWEVENT_ENTER: data->callback(MLX_WINDOW_EVENT, 4, data->userdata); break; + case SDL_WINDOWEVENT_FOCUS_GAINED: data->callback(MLX_WINDOW_EVENT, 5, data->userdata); break; + case SDL_WINDOWEVENT_LEAVE: data->callback(MLX_WINDOW_EVENT, 6, data->userdata); break; + case SDL_WINDOWEVENT_FOCUS_LOST: data->callback(MLX_WINDOW_EVENT, 7, data->userdata); break; + + default : break; + } + break; + } + + default: break; + } + }, &watcher_data); } - void* SDLManager::CreateWindow(const std::string& title, std::size_t w, std::size_t h) + void* SDLManager::CreateWindow(const std::string& title, std::size_t w, std::size_t h, bool hidden) { details::WindowInfos* infos = new details::WindowInfos; Verify(infos != nullptr, "SDL : window allocation failed"); - infos->window = SDL_CreateWindow(title.c_str(), SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, w, h, SDL_WINDOW_VULKAN | SDL_WINDOW_SHOWN); + infos->window = SDL_CreateWindow(title.c_str(), SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, w, h, SDL_WINDOW_VULKAN | (hidden ? SDL_WINDOW_HIDDEN : SDL_WINDOW_SHOWN)); if(!infos->window) FatalError("SDL : unable to open a new window; %", SDL_GetError()); infos->icon = SDL_CreateRGBSurfaceFrom(static_cast(logo_mlx), logo_mlx_width, logo_mlx_height, 32, 4 * logo_mlx_width, rmask, gmask, bmask, amask); @@ -84,6 +134,34 @@ namespace mlx delete infos; } + VkSurfaceKHR SDLManager::CreateVulkanSurface(Handle window, VkInstance instance) const noexcept + { + VkSurfaceKHR surface; + if(!SDL_Vulkan_CreateSurface(static_cast(window), instance, &surface)) + FatalError("SDL : could not create a Vulkan surface; %", SDL_GetError()); + return surface; + } + + std::vector SDLManager::GetRequiredVulkanInstanceExtentions(Handle window) const noexcept + { + std::uint32_t count; + if(!SDL_Vulkan_GetInstanceExtensions(static_cast(window), &count, nullptr)) + FatalError("Vulkan : cannot get instance extentions from window : %", SDL_GetError()); + + std::vector extensions(count); + + if(!SDL_Vulkan_GetInstanceExtensions(static_cast(window), &count, extensions.data())) + FatalError("Vulkan : cannot get instance extentions from window : %", SDL_GetError()); + return extentions; + } + + Vec2ui SDLManager::GetVulkanDrawableSize(Handle window) const noexcept + { + Vec2ui extent; + SDL_Vulkan_GetDrawableSize(window, &extent.x, &extent.y); + return extent; + } + void SDLManager::Shutdown() noexcept { if(m_drop_sdl_responsability) diff --git a/runtime/Sources/Core/UUID.cpp b/runtime/Sources/Core/UUID.cpp index 96ae62f..f2da9c2 100644 --- a/runtime/Sources/Core/UUID.cpp +++ b/runtime/Sources/Core/UUID.cpp @@ -1,18 +1,6 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* UUID.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/06 11:26:37 by maldavid #+# #+# */ -/* Updated: 2024/04/23 14:09:35 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #include -#include +#include namespace mlx { diff --git a/runtime/Sources/Graphics/Mesh.cpp b/runtime/Sources/Graphics/Mesh.cpp new file mode 100644 index 0000000..3f8221f --- /dev/null +++ b/runtime/Sources/Graphics/Mesh.cpp @@ -0,0 +1,31 @@ +#include +#include +#include + +namespace mlx +{ + void Mesh::Draw(VkCommandBuffer cmd, std::size_t& drawcalls, std::size_t& polygondrawn) const noexcept + { + for(std::size_t i = 0; i < m_sub_meshes.size(); i++) + Draw(cmd, drawcalls, polygondrawn, i); + } + + void Mesh::Draw(VkCommandBuffer cmd, std::size_t& drawcalls, std::size_t& polygondrawn, std::size_t submesh_index) const noexcept + { + Verify(submesh_index < m_sub_meshes.size(), "invalid submesh index"); + m_sub_meshes[submesh_index].vbo.Bind(cmd); + m_sub_meshes[submesh_index].ibo.Bind(cmd); + vkCmdDrawIndexed(cmd, static_cast(m_sub_meshes[submesh_index].ibo.GetSize() / sizeof(std::uint32_t)), 1, 0, 0, 0); + polygondrawn += m_sub_meshes[submesh_index].triangle_count; + drawcalls++; + } + + Mesh::~Mesh() + { + for(auto& mesh : m_sub_meshes) + { + mesh.vbo.Destroy(); + mesh.ibo.Destroy(); + } + } +} diff --git a/runtime/Sources/Graphics/Scene.cpp b/runtime/Sources/Graphics/Scene.cpp new file mode 100644 index 0000000..547e641 --- /dev/null +++ b/runtime/Sources/Graphics/Scene.cpp @@ -0,0 +1,19 @@ +#include +#include +#include +#include + +namespace Scop +{ + Scene::Scene(SceneDescriptor desc) + : m_descriptor(std::move(desc)) + { + } + + Sprite& Scene::CreateSprite(std::shared_ptr texture) noexcept + { + std::shared_ptr sprite = std::make_shared(texture); + m_sprites.push_back(sprite); + return *sprite; + } +} diff --git a/runtime/Sources/Graphics/Sprite.cpp b/runtime/Sources/Graphics/Sprite.cpp new file mode 100644 index 0000000..8a65924 --- /dev/null +++ b/runtime/Sources/Graphics/Sprite.cpp @@ -0,0 +1,44 @@ +#include +#include +#include +#include + +namespace mlx +{ + std::shared_ptr CreateQuad(float x, float y, float width, float height) + { + std::vector data(4); + + data[0].position = Vec4f(x, y, 0.0f, 1.0f); + data[0].uv = Vec2f(1.0f, 1.0f); + + data[1].position = Vec4f(x + width, y, 0.0f, 1.0f); + data[1].uv = Vec2f(0.0f, 1.0f); + + data[2].position = Vec4f(x + width, y + height, 0.0f, 1.0f); + data[2].uv = Vec2f(0.0f, 0.0f); + + data[3].position = Vec4f(x, y + height, 0.0f, 1.0f); + data[3].uv = Vec2f(1.0f, 0.0f); + + std::vector indices = { + 0, + 1, + 2, + 2, + 3, + 0, + }; + + std::shared_ptr mesh = std::make_shared(); + mesh->AddSubMesh({ std::move(data), std::move(indices) }); + return mesh; + } + + Sprite::Sprite(std::shared_ptr texture) + { + Verify((bool)texture, "Sprite: invalid texture"); + p_mesh = CreateQuad(0, 0, texture->GetWidth(), texture->GetHeight()); + p_texture = texture; + } +} diff --git a/runtime/Sources/Platform/Inputs.cpp b/runtime/Sources/Platform/Inputs.cpp index 2cc6ca6..770c59b 100644 --- a/runtime/Sources/Platform/Inputs.cpp +++ b/runtime/Sources/Platform/Inputs.cpp @@ -1,20 +1,6 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* inputs.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/05 16:30:19 by maldavid #+# #+# */ -/* Updated: 2024/03/27 15:50:07 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ +#include -#include - -#include "inputs.h" -#include -#include +#include namespace mlx { diff --git a/runtime/Sources/Platform/Window.cpp b/runtime/Sources/Platform/Window.cpp index 66ae725..bc15aec 100644 --- a/runtime/Sources/Platform/Window.cpp +++ b/runtime/Sources/Platform/Window.cpp @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Window.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 17:36:44 by maldavid #+# #+# */ -/* Updated: 2024/07/05 13:12:51 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #include #include @@ -17,10 +5,10 @@ namespace mlx { - Window::Window(std::size_t w, std::size_t h, const std::string& title) : m_width(w), m_height(h) + Window::Window(std::size_t w, std::size_t h, const std::string& title, bool hidden) : m_width(w), m_height(h) { static std::uint64_t ids = 0; - p_window = SDLManager::Get().CreateWindow(title, w, h); + p_window = SDLManager::Get().CreateWindow(title, w, h, hidden); m_id = ids++; } diff --git a/runtime/Sources/Renderer/Buffer.cpp b/runtime/Sources/Renderer/Buffer.cpp new file mode 100644 index 0000000..f6feb3a --- /dev/null +++ b/runtime/Sources/Renderer/Buffer.cpp @@ -0,0 +1,175 @@ +#include +#include +#include + +namespace mlx +{ + void GPUBuffer::Init(BufferType type, VkDeviceSize size, VkBufferUsageFlags usage, CPUBuffer data) + { + VmaAllocationCreateInfo alloc_info{}; + alloc_info.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; + alloc_info.usage = VMA_MEMORY_USAGE_AUTO; + + if(type == BufferType::Constant) + { + if(data.Empty()) + { + Warning("Vulkan : trying to create constant buffer without data (constant buffers cannot be modified after creation)"); + return; + } + m_usage = usage | VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + } + else if(type == BufferType::HighDynamic) + m_usage = usage; + else // LowDynamic or Staging + m_usage = usage | VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + + if(type == BufferType::Staging && data.Empty()) + Warning("Vulkan : trying to create staging buffer without data (wtf?)"); + + CreateBuffer(size, m_usage, alloc_info); + + if(!data.Empty()) + { + if(p_map != nullptr) + std::memcpy(m_memory.map, data.GetData(), data.GetSize()); + } + if(type == BufferType::Constant || type == BufferType::LowDynamic) + PushToGPU(); + } + + void GPUBuffer::CreateBuffer(VkDeviceSize size, VkBufferUsageFlags usage, VmaAllocationCreateInfo alloc_info) + { + VkBufferCreateInfo bufferInfo{}; + bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + bufferInfo.size = size; + bufferInfo.usage = usage; + bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + + m_allocation = RenderCore::Get().GetAllocator().CreateBuffer(&bufferInfo, &info, m_buffer, nullptr); + if(alloc_info.flags != 0) + RenderCore::Get().GetAllocator().MapMemory(m_allocation, &p_map); + } + + bool GPUBuffer::CopyFrom(const GPUBuffer& buffer) noexcept + { + if(!(m_usage & VK_BUFFER_USAGE_TRANSFER_DST_BIT)) + { + Error("Vulkan : buffer cannot be the destination of a copy because it does not have the correct usage flag"); + return false; + } + if(!(buffer.m_usage & VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) + { + Error("Vulkan : buffer cannot be the source of a copy because it does not have the correct usage flag"); + return false; + } + + VkCommandBuffer cmd = kvfCreateCommandBuffer(RenderCore::Get().GetDevice()); + kvfBeginCommandBuffer(cmd, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT); + kvfCopyBufferToBuffer(cmd, m_buffer, buffer.Get(), m_memory.size); + kvfEndCommandBuffer(cmd); + VkFence fence = kvfCreateFence(RenderCore::Get().GetDevice()); + kvfSubmitSingleTimeCommandBuffer(RenderCore::Get().GetDevice(), cmd, KVF_GRAPHICS_QUEUE, fence); + kvfWaitForFence(RenderCore::Get().GetDevice(), fence); + kvfDestroyFence(RenderCore::Get().GetDevice(), fence); + return true; + } + + void GPUBuffer::PushToGPU() noexcept + { + VmaAllocationCreateInfo alloc_info{}; + alloc_info.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; + + GPUBuffer new_buffer; + new_buffer.m_usage = (this->m_usage & 0xFFFFFFFC) | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + new_buffer.CreateBuffer(m_memory.size, new_buffer.m_usage, alloc_info); + + if(new_buffer.CopyFrom(*this)) + Swap(new_buffer); + new_buffer.Destroy(); + DebugLog("Vulkan : pushed buffer to GPU memory"); + } + + void GPUBuffer::Destroy() noexcept + { + if(m_buffer == VK_NULL_HANDLE) + return; + RenderCore::Get().GetAllocator().UnmapMemory(m_allocation); + RenderCore::Get().GetAllocator().DestroyBuffer(m_allocation, m_buffer); + m_buffer = VK_NULL_HANDLE; + } + + void GPUBuffer::Swap(GPUBuffer& buffer) noexcept + { + std::swap(m_buffer, buffer.m_buffer); + std::swap(m_allocation, buffer.m_allocation); + std::swap(m_size, buffer.m_size); + std::swap(m_offset, buffer.m_offset); + std::swap(p_map, buffer.p_map); + std::swap(m_usage, buffer.m_usage); + } + + void VertexBuffer::SetData(CPUBuffer data) + { + if(data.GetSize() > m_memory.size) + { + Error("Vulkan : trying to store to much data in a vertex buffer (% bytes in % bytes)", data.GetSize(), m_memory.size); + return; + } + if(data.Empty()) + { + Warning("Vulkan : cannot set empty data in a vertex buffer"); + return; + } + GPUBuffer staging; + staging.Init(BufferType::Staging, data.GetSize(), VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, data); + CopyFrom(staging); + staging.Destroy(); + } + + void IndexBuffer::SetData(CPUBuffer data) + { + if(data.GetSize() > m_memory.size) + { + Error("Vulkan : trying to store to much data in an index buffer (% bytes in % bytes)", data.GetSize(), m_memory.size); + return; + } + if(data.Empty()) + { + Warning("Vulkan : cannot set empty data in an index buffer"); + return; + } + GPUBuffer staging; + staging.Init(BufferType::Staging, data.GetSize(), VK_BUFFER_USAGE_INDEX_BUFFER_BIT, data); + CopyFrom(staging); + staging.Destroy(); + } + + void UniformBuffer::Init(std::uint32_t size) + { + for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) + { + m_buffers[i].Init(BufferType::HighDynamic, size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, {}); + m_maps[i] = m_buffers[i].GetMap(); + if(m_maps[i] == nullptr) + FatalError("Vulkan : unable to map a uniform buffer"); + } + } + + void UniformBuffer::SetData(CPUBuffer data, std::size_t frame_index) + { + if(data.GetSize() != m_buffers[frame_index].GetSize()) + { + Error("Vulkan : invalid data size to update to a uniform buffer, % != %", data.GetSize(), m_buffers[frame_index].GetSize()); + return; + } + if(m_maps[frame_index] != nullptr) + std::memcpy(m_maps[frame_index], data.GetData(), data.GetSize()); + } + + void UniformBuffer::Destroy() noexcept + { + for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) + m_buffers[i].Destroy(); + } +} diff --git a/runtime/Sources/Renderer/Buffers/Buffer.cpp b/runtime/Sources/Renderer/Buffers/Buffer.cpp deleted file mode 100644 index 6f6455f..0000000 --- a/runtime/Sources/Renderer/Buffers/Buffer.cpp +++ /dev/null @@ -1,150 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Buffer.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 18:55:57 by maldavid #+# #+# */ -/* Updated: 2024/04/23 14:20:13 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include -#include -#include - -namespace mlx -{ - void Buffer::Create(BufferType type, VkDeviceSize size, VkBufferUsageFlags usage, const char* name, const void* data) - { - MLX_PROFILE_FUNCTION(); - m_usage = usage; - if(type == BufferType::Constant || type == BufferType::LowDynamic) - { - if(data == nullptr && type == BufferType::Constant) - { - Warning("Vulkan : trying to create constant buffer without data (constant buffers cannot be modified after creation)"); - return; - } - m_usage |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT; - } - - VmaAllocationCreateInfo alloc_info{}; - alloc_info.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; - alloc_info.usage = VMA_MEMORY_USAGE_AUTO; - - CreateBuffer(m_usage, alloc_info, size, name); - - if(data != nullptr) - { - void* mapped = nullptr; - MapMem(&mapped); - std::memcpy(mapped, data, size); - UnmapMem(); - if(type == BufferType::constant || type == BufferType::LowDynamic) - PushToGPU(); - } - } - - void Buffer::Destroy() noexcept - { - MLX_PROFILE_FUNCTION(); - if(m_is_mapped) - UnmapMem(); - if(m_buffer != VK_NULL_HANDLE) - RenderCore::Get().GetAllocator().DestroyBuffer(m_allocation, m_buffer); - m_buffer = VK_NULL_HANDLE; - } - - void Buffer::CreateBuffer(VkBufferUsageFlags usage, VmaAllocationCreateInfo info, VkDeviceSize size, [[maybe_unused]] const char* name) - { - MLX_PROFILE_FUNCTION(); - VkBufferCreateInfo bufferInfo{}; - bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; - bufferInfo.size = size; - bufferInfo.usage = usage; - bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; - - #ifdef DEBUG - m_name = name; - std::string alloc_name = m_name; - if(usage & VK_BUFFER_USAGE_INDEX_BUFFER_BIT) - alloc_name.append("_index_buffer"); - else if(usage & VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) - alloc_name.append("_vertex_buffer"); - else if(!(usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)) - alloc_name.append("_buffer"); - m_allocation = RenderCore::Get().GetAllocator().CreateBuffer(&bufferInfo, &info, m_buffer, alloc_name.c_str()); - #else - m_allocation = RenderCore::Get().GetAllocator().CreateBuffer(&bufferInfo, &info, m_buffer, nullptr); - #endif - m_size = size; - } - - bool Buffer::CopyFromBuffer(const Buffer& buffer) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!(m_usage & VK_BUFFER_USAGE_TRANSFER_DST_BIT)) - { - Error("Vulkan : buffer cannot be the destination of a copy because it does not have the correct usage flag"); - return false; - } - if(!(buffer.m_usage & VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) - { - Error("Vulkan : buffer cannot be the source of a copy because it does not have the correct usage flag"); - return false; - } - - CmdBuffer& cmd = RenderCore::Get().GetSingleTimeCmdBuffer(); - cmd.BeginRecord(); - - cmd.CopyBuffer(*this, const_cast(buffer)); - - cmd.EndRecord(); - cmd.SubmitIdle(); - - return true; - } - - void Buffer::PushToGPU() noexcept - { - MLX_PROFILE_FUNCTION(); - VmaAllocationCreateInfo alloc_info{}; - alloc_info.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; - - Buffer new_buffer; - new_buffer.m_usage = (m_usage & 0xFFFFFFFC) | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - #ifdef DEBUG - std::string new_name = m_name + "_GPU"; - new_buffer.CreateBuffer(new_buffer.m_usage, alloc_info, m_size, new_name.c_str()); - #else - new_buffer.CreateBuffer(new_buffer.m_usage, alloc_info, m_size, nullptr); - #endif - - if(new_buffer.CopyFromBuffer(*this)) // if the copy succeded we swap the buffers, otherwise the new one is deleted - this->Swap(new_buffer); - new_buffer.Destroy(); // destroying the old buffer as they have been swapped - } - - void Buffer::Swap(Buffer& buffer) noexcept - { - std::swap(m_buffer, buffer.m_buffer); - std::swap(m_allocation, buffer.m_allocation); - std::swap(m_size, buffer.m_size); - std::swap(m_offset, buffer.m_offset); - #ifdef DEBUG - std::swap(m_name, buffer.m_name); - #endif - std::swap(m_usage, buffer.m_usage); - std::swap(m_is_mapped, buffer.m_is_mapped); - } - - void Buffer::Flush(VkDeviceSize size, VkDeviceSize offset) - { - RenderCore::Get().GetAllocator().Flush(m_allocation, size, offset); - } -} diff --git a/runtime/Sources/Renderer/Buffers/UniformBuffer.cpp b/runtime/Sources/Renderer/Buffers/UniformBuffer.cpp deleted file mode 100644 index 0359c03..0000000 --- a/runtime/Sources/Renderer/Buffers/UniformBuffer.cpp +++ /dev/null @@ -1,78 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* UniformBuffer.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:45:52 by maldavid #+# #+# */ -/* Updated: 2024/04/23 14:25:17 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include - -namespace mlx -{ - void UniformBuffer::create(NonOwningPtr renderer, std::uint32_t size, [[maybe_unused]] const char* name) - { - MLX_PROFILE_FUNCTION(); - p_renderer = renderer; - - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - { - #ifdef DEBUG - std::string name_frame = name; - name_frame.append(std::to_string(i)); - m_buffers[i].create(BufferType::HighDynamic, size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, name_frame.c_str()); - #else - _buffers[i].Create(BufferType::HighDynamic, size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, nullptr); - #endif - m_buffers[i].MapMem(&_maps[i]); - if(m_maps[i] == nullptr) - FatalError("Vulkan : unable to map a uniform buffer"); - } - } - - void UniformBuffer::SetData(std::uint32_t size, const void* data) - { - MLX_PROFILE_FUNCTION(); - std::memcpy(m_maps[p_renderer->GetActiveImageIndex()], data, static_cast(size)); - } - - void UniformBuffer::SetDynamicData(std::uint32_t size, const void* data) - { - MLX_PROFILE_FUNCTION(); - std::memcpy(m_maps[p_renderer->GetActiveImageIndex()], data, static_cast(size)); - m_buffers[p_renderer->GetActiveImageIndex()].Flush(); - } - - unsigned int UniformBuffer::GetSize() noexcept - { - return m_buffers[p_renderer->GetActiveImageIndex()].GetSize(); - } - - unsigned int UniformBuffer::GetOffset() noexcept - { - return m_buffers[p_renderer->GetActiveImageIndex()].GetOffset(); - } - - VkBuffer& UniformBuffer::operator()() noexcept - { - return m_buffers[p_renderer->GetActiveImageIndex()].Get(); - } - - VkBuffer& UniformBuffer::Get() noexcept - { - return m_buffers[p_renderer->GetActiveImageIndex()].Get(); - } - - void UniformBuffer::Destroy() noexcept - { - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - m_buffers[i].Destroy(); - } -} diff --git a/runtime/Sources/Renderer/Buffers/VertexBuffer.cpp b/runtime/Sources/Renderer/Buffers/VertexBuffer.cpp deleted file mode 100644 index 41afa98..0000000 --- a/runtime/Sources/Renderer/Buffers/VertexBuffer.cpp +++ /dev/null @@ -1,56 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* VertexBuffer.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:28:08 by maldavid #+# #+# */ -/* Updated: 2024/04/23 14:48:15 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include - -namespace mlx -{ - void RAMVertexBuffer::SetData(std::uint32_t size, const void* data) - { - if(size > GetSize()) - { - Error("Vulkan : trying to store to much data in a vertex buffer (% bytes in % bytes)", size, GetSize()); - return; - } - - if(data == nullptr) - Warning("Vulkan : mapping null data in a vertex buffer"); - - void* temp = nullptr; - MapMem(&temp); - std::memcpy(temp, data, static_cast(size)); - UnmapMem(); - } - - void DeviceVertexBuffer::SetData(std::uint32_t size, const void* data) - { - if(size > GetSize()) - { - Error("Vulkan : trying to store to much data in a vertex buffer (% bytes in % bytes)", size, GetSize()); - return; - } - - if(data == nullptr) - Warning("Vulkan : mapping null data in a vertex buffer"); - - Buffer tmp_buf; - #ifdef DEBUG - tmp_buf.Create(BufferType::HighDynamic, size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT, "tmp_buffer", data); - #else - tmp_buf.Create(BufferType::HighDynamic, size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT, nullptr, data); - #endif - CopyFromBuffer(tmp_buf); - tmp_buf.Destroy(); - } -} diff --git a/runtime/Sources/Renderer/Command/CommandBuffer.cpp b/runtime/Sources/Renderer/Command/CommandBuffer.cpp deleted file mode 100644 index 6574682..0000000 --- a/runtime/Sources/Renderer/Command/CommandBuffer.cpp +++ /dev/null @@ -1,365 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* CommandBuffer.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:26:06 by maldavid #+# #+# */ -/* Updated: 2024/04/23 18:02:20 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include -#include -#include -#include -#include -#include - -namespace mlx -{ - bool VectorPushBackIfNotFound(std::vector>& vector, NonOwningPtr res) - { - auto it = std::find_if(vector.begin(), vector.end(), [=](const NonOwningPtr vres) - { - return vres->GetUUID() == res->GetUUID(); - }); - - if(it == vector.end()) - { - vector.push_back(res); - return true; - } - return false; - } - - void CmommanBuffer::Init(CommandBufferType type, NonOwningPtr manager) - { - Init(type, &manager->GetCmdPool()); - } - - void CommandBuffer::Init(CommandBufferType type, NonOwningPtr pool) - { - MLX_PROFILE_FUNCTION(); - m_type = type; - m_pool = pool; - - VkCommandBufferAllocateInfo alloc_info{}; - alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; - alloc_info.commandPool = pool->get(); - alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; - alloc_info.commandBufferCount = 1; - - VkResult res = vkAllocateCommandBuffers(RenderCore::Get().getDevice().get(), &allocInfo, &_cmd_buffer); - if(res != VK_SUCCESS) - FatalError("Vulkan : failed to allocate command buffer, %s", RCore::verbaliseResultVk(res)); - #ifdef DEBUG - Message("Vulkan : created new command buffer"); - #endif - - m_fence.init(); - state = CommandBufferState::Idle; - } - - void CommandBuffer::BeginRecord(VkCommandBufferUsageFlags usage) - { - MLX_PROFILE_FUNCTION(); - if(!IsInit()) - FatalError("Vulkan : begenning record on un uninit command buffer"); - if(m_state == CommandBufferState::Recording) - return; - - VkCommandBufferBeginInfo begin_info{}; - begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; - begin_info.flags = usage; - if(vkBeginCommandBuffer(m_cmd_buffer, &begin_info) != VK_SUCCESS) - FatalError("Vulkan : failed to begin recording command buffer"); - - m_state = CommandBufferState::Recording; - } - - void CommandBuffer::BindVertexBuffer(Buffer& buffer) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!IsRecording()) - { - Warning("Vulkan : trying to bind a vertex buffer to a non recording command buffer"); - return; - } - VkDeviceSize offset[] = { buffer.GetOffset() }; - vkCmdBindVertexBuffers(m_cmd_buffer, 0, 1, &buffer.Get(), offset); - - buffer.RecordedInCommandBuffer(); - VectorPushBackIfNotFound(m_cmd_resources, &buffer); - } - - void CommandBuffer::NindIndexBuffer(Buffer& buffer) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!IsRecording()) - { - Warning("Vulkan : trying to bind a index buffer to a non recording command buffer"); - return; - } - vkCmdBindIndexBuffer(m_cmd_buffer, buffer.Get(), buffer.GetOffset(), VK_INDEX_TYPE_UINT16); - - buffer.RecordedInCommandBuffer(); - VectorPushBackIfNotFound(m_cmd_resources, &buffer); - } - - void CommandBuffer::CopyBuffer(Buffer& dst, Buffer& src) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!IsRecording()) - { - Warning("Vulkan : trying to do a buffer to buffer copy in a non recording command buffer"); - return; - } - - PreTransferBarrier(); - - VkBufferCopy copy_region{}; - copy_region.size = src.GetSize(); - vkCmdCopyBuffer(m_cmd_buffer, src.Get(), dst.Get(), 1, ©_region); - - PostTransferBarrier(); - - dst.RecordedInCommandBuffer(); - src.RecordedInCommandBuffer(); - VectorPushBackIfNotFound(m_cmd_resources, &dst); - VectorPushBackIfNotFound(m_cmd_resources, &src); - } - - void CommandBuffer::CopyBufferToImage(Buffer& buffer, Image& image) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!IsRecording()) - { - Warning("Vulkan : trying to do a buffer to image copy in a non recording command buffer"); - return; - } - - PreTransferBarrier(); - - VkBufferImageCopy region{}; - region.bufferOffset = 0; - region.bufferRowLength = 0; - region.bufferImageHeight = 0; - region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; - region.imageSubresource.mipLevel = 0; - region.imageSubresource.baseArrayLayer = 0; - region.imageSubresource.layerCount = 1; - region.imageOffset = { 0, 0, 0 }; - region.imageExtent = { image.GetWidth(), image.GetHeight(), 1 }; - - vkCmdCopyBufferToImage(m_cmd_buffer, buffer.Get(), image.Get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); - - PostTransferBarrier(); - - image.RecordedInCommandBuffer(); - buffer.RecordedInCommandBuffer(); - VectorPushBackIfNotFound(m_cmd_resources, &image); - VectorPushBackIfNotFound(m_cmd_resources, &buffer); - } - - void CommandBuffer::CopyImagetoBuffer(Image& image, Buffer& buffer) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!IsRecording()) - { - Warning("Vulkan : trying to do an image to buffer copy in a non recording command buffer"); - return; - } - - PreTransferBarrier(); - - VkBufferImageCopy region{}; - region.bufferOffset = 0; - region.bufferRowLength = 0; - region.bufferImageHeight = 0; - region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; - region.imageSubresource.mipLevel = 0; - region.imageSubresource.baseArrayLayer = 0; - region.imageSubresource.layerCount = 1; - region.imageOffset = { 0, 0, 0 }; - region.imageExtent = { image.GetWidth(), image.GetHeight(), 1 }; - - vkCmdCopyImageToBuffer(m_cmd_buffer, image.Get(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer.Get(), 1, ®ion); - - PostTransferBarrier(); - - image.RecordedInCommandBuffer(); - buffer.RecordedInCommandBuffer(); - VectorPushBackIfNotFound(m_cmd_resources, &buffer); - VectorPushBackIfNotFound(m_cmd_resources, &image); - } - - void CommandBuffer::TransitionImageLayout(Image& image, VkImageLayout new_layout) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!IsRecording()) - { - Warning("Vulkan : trying to do an image layout transition in a non recording command buffer"); - return; - } - - VkImageMemoryBarrier barrier{}; - barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; - barrier.oldLayout = image.GetLayout(); - barrier.newLayout = new_layout; - barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; - barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; - barrier.image = image.Get(); - barrier.subresourceRange.aspectMask = IsDepthFormat(image.GetFormat()) ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT; - barrier.subresourceRange.baseMipLevel = 0; - barrier.subresourceRange.levelCount = 1; - barrier.subresourceRange.baseArrayLayer = 0; - barrier.subresourceRange.layerCount = 1; - barrier.srcAccessMask = LayoutToAccessMask(image.GetLayout(), false); - barrier.dstAccessMask = LayoutToAccessMask(new_layout, true); - if(IsStencilFormat(image.GetFormat())) - barrier.subresourceRange.aspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT; - - VkPipelineStageFlags source_stage = 0; - if(barrier.oldLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) - source_stage = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; - else if(barrier.srcAccessMask != 0) - source_stage = AccessFlagsToPipelineStage(barrier.srcAccessMask, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT); - else - source_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; - - VkPipelineStageFlags destination_stage = 0; - if(barrier.newLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) - destination_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; - else if(barrier.dstAccessMask != 0) - destination_stage = AccessFlagsToPipelineStage(barrier.dstAccessMask, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT); - else - destination_stage = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; - - vkCmdPipelineBarrier(m_cmd_buffer, source_stage, destination_stage, 0, 0, nullptr, 0, nullptr, 1, &barrier); - - image.RecordedInCommandBuffer(); - VectorPushBackIfNotFound(m_cmd_resources, &image); - } - - void CommandBuffer::EndRecord() - { - MLX_PROFILE_FUNCTION(); - if(!IsInit()) - FatalError("Vulkan : ending record on un uninit command buffer"); - if(m_state != CommandBufferState::Recording) - return; - if(vkEndCommandBuffer(m_cmd_buffer) != VK_SUCCESS) - FatalError("Vulkan : failed to end recording command buffer"); - - m_state = CommandBufferState::Idle; - } - - void CommandBuffer::SubmitIdle(bool should_wait_for_execution) noexcept - { - MLX_PROFILE_FUNCTION(); - if(m_type != CommandBufferType::SingleTime) - { - Error("Vulkan : try to perform an idle submit on a command buffer that is not single-time, this is not allowed"); - return; - } - - m_fence.Reset(); - - VkSubmitInfo submit_info{}; - submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; - submit_info.commandBufferCount = 1; - submit_info.pCommandBuffers = &m_cmd_buffer; - - VkResult res = vkQueueSubmit(RenderCore::Get().GetQueue().GetGraphic(), 1, &submit_info, m_fence.Get()); - if(res != VK_SUCCESS) - FatalError("Vulkan error : failed to submit a single time command buffer, %", VerbaliseVkResult(res)); - m_state = CommandBufferState::Submitted; - - if(should_wait_for_execution) - WaitForExecution(); - } - - void CommandBuffer::Submit(NonOwningPtr signal, NonOwningPtr wait) noexcept - { - MLX_PROFILE_FUNCTION(); - std::array signal_semaphores; - std::array wait_semaphores; - - signal_semaphores[0] = (signal ? signal->Get() : VK_NULL_HANDLE); - - wait_semaphores[0] = (wait ? wait->Get() : VK_NULL_HANDLE); - VkPipelineStageFlags wait_stages[] = { VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT }; - - m_fence.Reset(); - - VkSubmitInfo submit_info{}; - submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; - submit_info.waitSemaphoreCount = (!wait ? 0 : wait_semaphores.size()); - submit_info.pWaitSemaphores = wait_semaphores.data(); - submit_info.pWaitDstStageMask = wait_stages; - submit_info.commandBufferCount = 1; - submit_info.pCommandBuffers = &m_cmd_buffer; - submit_info.signalSemaphoreCount = (!signal ? 0 : signal_semaphores.size()); - submit_info.pSignalSemaphores = signal_semaphores.data(); - - VkResult res = vkQueueSubmit(RenderCore::Get().GetQueue().GetGraphic(), 1, &submit_info, m_fence.get()); - if(res != VK_SUCCESS) - FatalError("Vulkan error : failed to submit draw command buffer, %", VerbaliseVkResult(res)); - m_state = CommandBufferState::Submitted; - } - - void CommandBuffer::UpdateSubmitState() noexcept - { - MLX_PROFILE_FUNCTION(); - if(!m_fence.IsReady()) - return; - - for(NonOwningPtr res : m_cmd_resources) - { - if(res) - res->RemovedFromCommandBuffer(); - } - m_cmd_resources.clear(); - m_state = CommandBufferState::Ready; - } - - void CommandBuffer::PreTransferBarrier() noexcept - { - MLX_PROFILE_FUNCTION(); - VkMemoryBarrier memory_barrier{}; - memory_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; - memory_barrier.pNext = nullptr; - memory_barrier.srcAccessMask = 0U; - memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; - - vkCmdPipelineBarrier(m_cmd_buffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 1, &memory_barrier, 0, nullptr, 0, nullptr); - } - - void CommandBuffer::PostTransferBarrier() noexcept - { - MLX_PROFILE_FUNCTION(); - VkMemoryBarrier memory_barrier{}; - memory_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; - memory_barrier.pNext = nullptr; - memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; - memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT; - - vkCmdPipelineBarrier(m_cmd_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 1, &memory_barrier, 0, nullptr, 0, nullptr); - } - - void CommandBuffer::Destroy() noexcept - { - MLX_PROFILE_FUNCTION(); - m_fence.Destroy(); - m_cmd_buffer = VK_NULL_HANDLE; - m_state = CommandBufferState::Uninit; - #ifdef DEBUG - Message("Vulkan : destroyed command buffer"); - #endif - } -} diff --git a/runtime/Sources/Renderer/Command/CommandManager.cpp b/runtime/Sources/Renderer/Command/CommandManager.cpp deleted file mode 100644 index df6fab9..0000000 --- a/runtime/Sources/Renderer/Command/CommandManager.cpp +++ /dev/null @@ -1,42 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* CommandManager.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/02 17:50:52 by maldavid #+# #+# */ -/* Updated: 2024/04/23 14:55:04 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include - -namespace mlx -{ - void CommandManager::Init() noexcept - { - m_cmd_pool.Init(); - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - m_cmd_buffers[i].Init(CommandBufferType::LongTime, this); - } - - void CommandManager::BeginRecord(int active_image_index) - { - m_cmd_buffers[active_image_index].BeginRecord(); - } - - void CommandManager::EndRecord(int active_image_index) - { - m_cmd_buffers[active_image_index].EndRecord(); - } - - void CommandManager::Destroy() noexcept - { - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - m_cmd_buffers[i].Destroy(); - m_cmd_pool.Destroy(); - } -} diff --git a/runtime/Sources/Renderer/Command/CommandPool.cpp b/runtime/Sources/Renderer/Command/CommandPool.cpp deleted file mode 100644 index af576a7..0000000 --- a/runtime/Sources/Renderer/Command/CommandPool.cpp +++ /dev/null @@ -1,37 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* CommandPool.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:24:33 by maldavid #+# #+# */ -/* Updated: 2024/04/23 14:57:15 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include - -namespace mlx -{ - void CommandPool::Init() - { - VkCommandPoolCreateInfo pool_info{}; - pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; - pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; - pool_info.queueFamilyIndex = RenderCore::Get().GetQueue().GetFamilies().graphics_family.value(); - - VkResult res = vkCreateCommandPool(RenderCore::Get().GetDevice().Get(), &pool_info, nullptr, &m_cmd_pool); - if(res != VK_SUCCESS) - FatalError("Vulkan : failed to create command pool, %", VerbaliseVkResult(res)); - } - - void CommandPool::Destroy() noexcept - { - vkDestroyCommandPool(RenderCore::Get().GetDevice().Get(), m_cmd_pool, nullptr); - m_cmd_pool = VK_NULL_HANDLE; - } -} diff --git a/runtime/Sources/Renderer/Command/SingleTimeCommandManager.cpp b/runtime/Sources/Renderer/Command/SingleTimeCommandManager.cpp deleted file mode 100644 index b9fdae4..0000000 --- a/runtime/Sources/Renderer/Command/SingleTimeCommandManager.cpp +++ /dev/null @@ -1,64 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* SingleTimeCommandManager.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/15 19:57:49 by maldavid #+# #+# */ -/* Updated: 2024/04/23 15:05:19 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include - -namespace mlx -{ - void SingleTimeCmdManager::Init() noexcept - { - m_pool.init(); - for(int i = 0; i < BASE_POOL_SIZE; i++) - { - m_buffers.emplace_back(); - m_buffers.back().Init(CommandBufferType::SingleTime, &m_pool); - } - } - - CommandBuffer& SingleTimeCmdManager::GetCmdBuffer() noexcept - { - for(CmdBuffer& buf : m_buffers) - { - if(buf.IsReadyToBeUsed()) - { - buf.reset(); - return buf; - } - } - m_buffers.emplace_back().Init(CommandBufferType::SingleTime, &m_pool); - return m_buffers.back(); - } - - void SingleTimeCmdManager::UpdateSingleTimesCmdBuffersSubmitState() noexcept - { - for(CmdBuffer& cmd : m_buffers) - cmd.UpdateSubmitState(); - } - - void SingleTimeCmdManager::WaitForAllExecutions() noexcept - { - for(CmdBuffer& cmd : m_buffers) - cmd.WaitForExecution(); - } - - void SingleTimeCmdManager::Destroy() noexcept - { - std::for_each(m_buffers.begin(), m_buffers.end(), [](CommandBuffer& buf) - { - buf.Destroy(); - }); - m_pool.Destroy(); - } -} diff --git a/runtime/Sources/Renderer/Core/Device.cpp b/runtime/Sources/Renderer/Core/Device.cpp deleted file mode 100644 index d26e966..0000000 --- a/runtime/Sources/Renderer/Core/Device.cpp +++ /dev/null @@ -1,142 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Device.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:14:29 by maldavid #+# #+# */ -/* Updated: 2024/04/23 18:10:08 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include - -namespace mlx -{ - const std::vector device_extensions = { VK_KHR_SWAPCHAIN_EXTENSION_NAME }; - - void Device::Init() - { - PickPhysicalDevice(); - - Queues::QueueFamilyIndices indices = RenderCore::Get().GetQueue().GetFamilies(); - - std::vector queue_create_infos; - std::set unique_queue_families = { indices.graphics_family.value(), indices.present_family.value() }; - - float queue_priority = 1.0f; - for(std::uint32_t queue_family : unique_queue_families) - { - VkDeviceQueueCreateInfo queue_create_info{}; - queue_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; - queue_create_info.queueFamilyIndex = queue_family; - queue_create_info.queueCount = 1; - queue_create_info.pQueuePriorities = &queue_priority; - queue_create_infos.push_back(queue_create_info); - } - - VkPhysicalDeviceFeatures device_features{}; - - VkDeviceCreateInfo create_info{}; - create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; - create_info.queueCreateInfoCount = static_cast(queue_create_infos.size()); - create_info.pQueueCreateInfos = queue_create_infos.data(); - create_info.pEnabledFeatures = &device_features; - create_info.enabledExtensionCount = static_cast(device_extensions.size()); - create_info.ppEnabledExtensionNames = device_extensions.data(); - create_info.enabledLayerCount = 0; - - VkResult res; - if((res = vkCreateDevice(m_physical_device, &create_info, nullptr, &m_device)) != VK_SUCCESS) - FatalError("Vulkan : failed to create logcal device, %", VerbaliseVkResult(res)); - DebugLog("Vulkan : created new logical device"); - } - - void Device::PickPhysicalDevice() - { - std::uint32_t device_count = 0; - vkEnumeratePhysicalDevices(RenderCore::Get().GetInstance().Get(), &device_count, nullptr); - - if(device_count == 0) - FatalError("Vulkan : failed to find GPUs with Vulkan support"); - - std::vector devices(device_count); - vkEnumeratePhysicalDevices(RenderCore::Get().GetInstance().Get(), &device_count, devices.data()); - - std::multimap devices_score; - - for(const auto& device : devices) - { - int score = DeviceScore(device); - devices_score.insert(std::make_pair(score, device)); - } - - if(devices_score.rbegin()->first > 0) - m_physical_device = devices_score.rbegin()->second; - else - FatalError("Vulkan : failed to find a suitable GPU"); - - #ifdef DEBUG - VkPhysicalDeviceProperties props; - vkGetPhysicalDeviceProperties(m_physical_device, &props); - DebugLog("Vulkan : picked a physical device, %s", props.deviceName); - #endif - RenderCore::Get().GetQueue().FindQueueFamilies(m_physical_device); // update queue indicies to current physical device - } - - int Device::DeviceScore(VkPhysicalDevice device) - { - Queues::QueueFamilyIndices indices = RenderCore::Get().GetQueue().FindQueueFamilies(device); - bool extensions_supported = CheckDeviceExtensionSupport(device); - - VkPhysicalDeviceProperties props; - vkGetPhysicalDeviceProperties(device, &props); - if(!indices.IsComplete() || !extensions_supported) - return -1; - - VkPhysicalDeviceFeatures features; - vkGetPhysicalDeviceFeatures(device, &features); - - int score = 0; - #ifndef FORCE_INTEGRATED_GPU - if(props.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) - score += 1000; - #else - if(props.deviceType != VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU) - return -1; - #endif - - if(!features.geometryShader) - return -1; - - score += props.limits.maxImageDimension2D; - score += props.limits.maxBoundDescriptorSets; - return score; - } - - bool Device::CheckDeviceExtensionSupport(VkPhysicalDevice device) - { - std::uint32_t extension_count; - vkEnumerateDeviceExtensionProperties(device, nullptr, &extension_count, nullptr); - - std::vector available_extensions(extensionCount); - vkEnumerateDeviceExtensionProperties(device, nullptr, &extension_count, available_extensions.data()); - - std::set required_extensions(device_extensions.begin(), device_extensions.end()); - - for(const auto& extension : available_extensions) - required_extensions.erase(extension.extensionName); - - return required_extensions.empty(); - } - - void Device::Destroy() noexcept - { - vkDestroyDevice(m_device, nullptr); - m_device = VK_NULL_HANDLE; - DebugLog("Vulkan : destroyed a logical device"); - } -} diff --git a/runtime/Sources/Renderer/Core/Fence.cpp b/runtime/Sources/Renderer/Core/Fence.cpp deleted file mode 100644 index 7db2b3d..0000000 --- a/runtime/Sources/Renderer/Core/Fence.cpp +++ /dev/null @@ -1,54 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Fence.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/02 17:53:06 by maldavid #+# #+# */ -/* Updated: 2024/04/23 18:13:09 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include - -namespace mlx -{ - void Fence::Init() - { - VkFenceCreateInfo fence_info{}; - fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; - fence_info.flags = VK_FENCE_CREATE_SIGNALED_BIT; - - VkResult res; - if((res = vkCreateFence(RenderCore::Get().GetDevice().Get(), &fence_info, nullptr, &m_fence)) != VK_SUCCESS) - FatalError("Vulkan : failed to create a synchronization object (fence), %", VerbaliseVkResult(res)); - DebugLog("Vulkan : created new fence"); - } - - void Fence::Wait() noexcept - { - vkWaitForFences(RenderCore::Get().GetDevice().Get(), 1, &m_fence, VK_TRUE, UINT64_MAX); - } - - void Fence::Reset() noexcept - { - vkResetFences(RenderCore::Get().GetDevice().Get(), 1, &m_fence); - } - - bool Fence::IsReady() const noexcept - { - return vkGetFenceStatus(RenderCore::Get().GetDevice().Get(), m_fence) == VK_SUCCESS; - } - - void Fence::destroy() noexcept - { - if(m_fence != VK_NULL_HANDLE) - vkDestroyFence(RenderCore::Get().GetDevice().Get(), m_fence, nullptr); - m_fence = VK_NULL_HANDLE; - DebugLog("Vulkan : destroyed fence"); - } -} diff --git a/runtime/Sources/Renderer/Core/Instance.cpp b/runtime/Sources/Renderer/Core/Instance.cpp deleted file mode 100644 index 694e3b2..0000000 --- a/runtime/Sources/Renderer/Core/Instance.cpp +++ /dev/null @@ -1,88 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Instance.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:04:21 by maldavid #+# #+# */ -/* Updated: 2024/04/23 18:43:47 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include - -namespace mlx -{ - void Instance::Init() - { - std::uint32_t api_version = std::min(volkGetInstanceVersion(), MLX_TARGET_VULKAN_API_VERSION); - - if(api_version == 0) - FatalError("Vulkan API is not supported by this driver"); - - m_instance_version = api_version; - - VkApplicationInfo app_info{}; - app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; - app_info.pEngineName = "MacroLibX"; - app_info.engineVersion = MLX_VERSION; - app_info.apiVersion = api_version; - - auto extensions = GetRequiredExtensions(); - - VkInstanceCreateInfo create_info{}; - create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; - create_info.pApplicationInfo = &app_info; - create_info.enabledExtensionCount = static_cast(extensions.size()); - create_info.ppEnabledExtensionNames = extensions.data(); - create_info.enabledLayerCount = 0; // will be replaced if validation layers are enabled - create_info.pNext = nullptr; - - VkDebugUtilsMessengerCreateInfoEXT debug_create_info; - if constexpr(enable_validation_layers) - { - if(RenderCore::Get().GetLayers().CheckValidationLayerSupport()) - { - create_info.enabledLayerCount = static_cast(validation_layers.size()); - create_info.ppEnabledLayerNames = validation_layers.data(); - RenderCore::Get().GetLayers().PopulateDebugMessengerCreateInfo(debug_create_info); - create_info.pNext = static_cast(&debug_create_info); - } - } - - VkResult res; - if((res = vkCreateInstance(&create_info, nullptr, &m_instance)) != VK_SUCCESS) - FatalError("Vulkan : failed to create Vulkan instance, %", VerbaliseVkResult(res)); - volkLoadInstance(m_instance); - DebugLog("Vulkan : created new instance"); - } - - std::vector Instance::GetRequiredExtensions() - { - std::uint32_t glfw_extension_count = 0; - const char** glfw_extensions = glfwGetRequiredInstanceExtensions(&glfw_extension_count); - - std::vector extensions(glfw_extensions, glfw_extensions + glfw_extension_count); - - extensions.push_back(VK_KHR_SURFACE_EXTENSION_NAME); - - if constexpr(enableValidationLayers) - { - extensions.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); - extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME); - } - - return extensions; - } - - void Instance::Destroy() noexcept - { - vkDestroyInstance(m_instance, nullptr); - m_instance = VK_NULL_HANDLE; - DebugLog("Vulkan : destroyed an instance"); - } -} diff --git a/runtime/Sources/Renderer/Core/Memory.cpp b/runtime/Sources/Renderer/Core/Memory.cpp deleted file mode 100644 index 80ce793..0000000 --- a/runtime/Sources/Renderer/Core/Memory.cpp +++ /dev/null @@ -1,199 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Memory.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: kbz_8 +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/10/20 22:02:37 by kbz_8 #+# #+# */ -/* Updated: 2024/04/23 18:49:10 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include - -#define VK_NO_PROTOTYPES -#define VMA_STATIC_VULKAN_FUNCTIONS 0 -#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0 -#define VMA_ASSERT(expr) ((void)0) -#define VMA_IMPLEMENTATION - -#ifdef MLX_COMPILER_CLANG - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Weverything" - #include - #pragma clang diagnostic pop -#elif defined(MLX_COMPILER_GCC) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wimplicit-fallthrough" - #pragma GCC diagnostic ignored "-Wmissing-field-initializers" - #pragma GCC diagnostic ignored "-Wunused-parameter" - #pragma GCC diagnostic ignored "-Wunused-variable" - #pragma GCC diagnostic ignored "-Wparentheses" - #include - #pragma GCC diagnostic pop -#else - #include -#endif - -#include -#include - -namespace mlx -{ - void GPUallocator::init() noexcept - { - VmaVulkanFunctions vma_vulkan_func{}; - vma_vulkan_func.vkAllocateMemory = vkAllocateMemory; - vma_vulkan_func.vkBindBufferMemory = vkBindBufferMemory; - vma_vulkan_func.vkBindImageMemory = vkBindImageMemory; - vma_vulkan_func.vkCreateBuffer = vkCreateBuffer; - vma_vulkan_func.vkCreateImage = vkCreateImage; - vma_vulkan_func.vkDestroyBuffer = vkDestroyBuffer; - vma_vulkan_func.vkDestroyImage = vkDestroyImage; - vma_vulkan_func.vkFlushMappedMemoryRanges = vkFlushMappedMemoryRanges; - vma_vulkan_func.vkFreeMemory = vkFreeMemory; - vma_vulkan_func.vkGetBufferMemoryRequirements = vkGetBufferMemoryRequirements; - vma_vulkan_func.vkGetImageMemoryRequirements = vkGetImageMemoryRequirements; - vma_vulkan_func.vkGetPhysicalDeviceMemoryProperties = vkGetPhysicalDeviceMemoryProperties; - vma_vulkan_func.vkGetPhysicalDeviceProperties = vkGetPhysicalDeviceProperties; - vma_vulkan_func.vkInvalidateMappedMemoryRanges = vkInvalidateMappedMemoryRanges; - vma_vulkan_func.vkMapMemory = vkMapMemory; - vma_vulkan_func.vkUnmapMemory = vkUnmapMemory; - vma_vulkan_func.vkCmdCopyBuffer = vkCmdCopyBuffer; -#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - vma_vulkan_func.vkGetBufferMemoryRequirements2KHR = vkGetBufferMemoryRequirements2, - vma_vulkan_func.vkGetImageMemoryRequirements2KHR = vkGetImageMemoryRequirements2, -#endif -#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 - vma_vulkan_func.vkBindBufferMemory2KHR = vkBindBufferMemory2, - vma_vulkan_func.vkBindImageMemory2KHR = vkBindImageMemory2, -#endif -#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 - vma_vulkan_func.vkGetPhysicalDeviceMemoryProperties2KHR = vkGetPhysicalDeviceMemoryProperties2, -#endif -#if VMA_VULKAN_VERSION >= 1003000 - vma_vulkan_func.vkGetDeviceBufferMemoryRequirements = vkGetDeviceBufferMemoryRequirements, - vma_vulkan_func.vkGetDeviceImageMemoryRequirements = vkGetDeviceImageMemoryRequirements, -#endif - - VmaAllocatorCreateInfo allocator_create_info{}; - allocator_create_info.vulkanApiVersion = RenderCore::Get().GetInstance().GetInstanceVersion(); - allocator_create_info.physicalDevice = RenderCore::Get().GetDevice().GetPhysicalDevice(); - allocator_create_info.device = RenderCore::Get().GetDevice().Get(); - allocator_create_info.instance = RenderCore::Get().GetInstance().Get(); - allocator_create_info.pVulkanFunctions = &vma_vulkan_func; - - VkResult res = vmaCreateAllocator(&allocator_create_info, &m_allocator); - if(res != VK_SUCCESS) - FatalError("Graphics allocator : failed to create graphics memory allocator, %", VerbaliseVkResult(res)); - DebugLog("Graphics allocator : created new allocator"); - } - - VmaAllocation GPUallocator::CreateBuffer(const VkBufferCreateInfo* binfo, const VmaAllocationCreateInfo* vinfo, VkBuffer& buffer, const char* name) noexcept - { - MLX_PROFILE_FUNCTION(); - VmaAllocation allocation; - VkResult res = vmaCreateBuffer(m_allocator, binfo, vinfo, &buffer, &allocation, nullptr); - if(res != VK_SUCCESS) - FatalError("Graphics allocator : failed to allocate a buffer, %s", RCore::verbaliseResultVk(res)); - if(name != nullptr) - { - RenderCore::Get().GetLayers().SetDebugUtilsObjectNameEXT(VK_OBJECT_TYPE_BUFFER, (std::uint64_t)buffer, name); - vmaSetAllocationName(m_allocator, allocation, name); - } - DebugLog("Graphics Allocator : created new buffer '%s'", name); - m_active_buffers_allocations++; - return allocation; - } - - void GPUallocator::DestroyBuffer(VmaAllocation allocation, VkBuffer buffer) noexcept - { - MLX_PROFILE_FUNCTION(); - vkDeviceWaitIdle(RenderCore::Get().GetDevice().Get()); - vmaDestroyBuffer(m_allocator, buffer, allocation); - DebugLog("Graphics Allocator : destroyed buffer"); - m_active_buffers_allocations--; - } - - VmaAllocation GPUallocator::CreateImage(const VkImageCreateInfo* iminfo, const VmaAllocationCreateInfo* vinfo, VkImage& image, const char* name) noexcept - { - MLX_PROFILE_FUNCTION(); - VmaAllocation allocation; - VkResult res = vmaCreateImage(m_allocator, iminfo, vinfo, &image, &allocation, nullptr); - if(res != VK_SUCCESS) - FatalError("Graphics allocator : failed to allocate an image, %", VerbaliseVkResult(res)); - if(name != nullptr) - { - RenderCore::Get().GetLayers().SetDebugUtilsObjectNameEXT(VK_OBJECT_TYPE_IMAGE, (std::uint64_t)image, name); - vmaSetAllocationName(m_allocator, allocation, name); - } - DebugLog("Graphics Allocator : created new image '%s'", name); - m_active_images_allocations++; - return allocation; - } - - void GPUallocator::DestroyImage(VmaAllocation allocation, VkImage image) noexcept - { - MLX_PROFILE_FUNCTION(); - vkDeviceWaitIdle(RenderCore::Get().GetDevice().Get()); - vmaDestroyImage(m_allocator, image, allocation); - DebugLog("Graphics Allocator : destroyed image"); - m_active_images_allocations--; - } - - void GPUallocator::MapMemory(VmaAllocation allocation, void** data) noexcept - { - MLX_PROFILE_FUNCTION(); - VkResult res = vmaMapMemory(m_allocator, allocation, data); - if(res != VK_SUCCESS) - FatalError("Graphics allocator : unable to map GPU memory to CPU memory, %", VerbaliseVkResult(res)); - } - - void GPUallocator::unmapMemory(VmaAllocation allocation) noexcept - { - MLX_PROFILE_FUNCTION(); - vmaUnmapMemory(m_allocator, allocation); - } - - void GPUallocator::dumpMemoryToJson() - { - static std::uint32_t id = 0; - std::string name("memory_dump"); - name.append(std::to_string(id) + ".json"); - std::ofstream file(name); - if(!file.is_open()) - { - Error("Graphics allocator : unable to dump memory to a json file"); - return; - } - char* str = nullptr; - vmaBuildStatsString(m_allocator, &str, true); - file << str; - vmaFreeStatsString(m_allocator, str); - file.close(); - id++; - } - - void GPUallocator::Flush(VmaAllocation allocation, VkDeviceSize size, VkDeviceSize offset) noexcept - { - MLX_PROFILE_FUNCTION(); - vmaFlushAllocation(m_allocator, allocation, offset, size); - } - - void GPUallocator::Destroy() noexcept - { - if(m_active_images_allocations != 0) - Error("Graphics allocator : some user-dependant allocations were not freed before destroying the display (% active allocations). You may have not destroyed all the MLX resources you've created", m_active_images_allocations); - else if(m_active_buffers_allocations != 0) - Error("Graphics allocator : some MLX-dependant allocations were not freed before destroying the display (% active allocations). This is an error in the MLX, please report this should not happen", m_active_buffers_allocations); - if(m_active_images_allocations < 0 || m_active_buffers_allocations < 0) - Warning("Graphics allocator : the impossible happened, the MLX has freed more allocations than it has made (wtf)"); - vmaDestroyAllocator(m_allocator); - m_active_buffers_allocations = 0; - m_active_images_allocations = 0; - DebugLog("Vulkan : destroyed a graphics allocator"); - } -} diff --git a/runtime/Sources/Renderer/Core/Queues.cpp b/runtime/Sources/Renderer/Core/Queues.cpp deleted file mode 100644 index b1b7ae8..0000000 --- a/runtime/Sources/Renderer/Core/Queues.cpp +++ /dev/null @@ -1,53 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Queues.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:02:42 by maldavid #+# #+# */ -/* Updated: 2024/04/23 18:51:21 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include - -namespace mlx -{ - Queues::QueueFamilyIndices Queues::findQueueFamilies(VkPhysicalDevice device) - { - std::uint32_t queue_family_count = 0; - vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, nullptr); - - std::vector queue_families(queueFamilyCount); - vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, queue_families.data()); - - n_families = Queues::QueueFamilyIndices{}; - int i = 0; - for(const auto& queue_family : queue_families) - { - if(queue_family.queueFlags & VK_QUEUE_GRAPHICS_BIT) - m_families->graphics_family = i; - - if(glfwGetPhysicalDevicePresentationSupport(RenderCore::Get().GetInstance().Get(), device, i)) - m_families->present_family = i; - - if(m_families->IsComplete()) - return *m_families; - i++; - } - - return *m_families; - } - - void Queues::Init() - { - if(!m_families.has_value()) - FindQueueFamilies(RenderCore::Get().GetDevice().GetPhysicalDevice()); - vkGetDeviceQueue(RenderCore::Get().GetDevice().Get(), m_families->graphics_family.value(), 0, &m_graphics_queue); - vkGetDeviceQueue(RenderCore::Get().GetDevice().Get(), m_families->present_family.value(), 0, &m_present_queue); - DebugLog("Vulkan : got graphics and present queues"); - } -} diff --git a/runtime/Sources/Renderer/Core/RenderCore.cpp b/runtime/Sources/Renderer/Core/RenderCore.cpp deleted file mode 100644 index 44ced4a..0000000 --- a/runtime/Sources/Renderer/Core/RenderCore.cpp +++ /dev/null @@ -1,134 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* RenderCore.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/12/17 23:33:34 by maldavid #+# #+# */ -/* Updated: 2024/04/23 18:54:26 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#define VK_NO_PROTOTYPES -#define VOLK_IMPLEMENTATION -#include - -#include - -#include -#include - -#ifdef DEBUG - #ifdef MLX_COMPILER_MSVC - #pragma NOTE("MLX is being compiled in debug mode, this activates Vulkan's validation layers and debug messages which may impact rendering performances") - #else - #warning "MLX is being compiled in debug mode, this activates Vulkan's validation layers and debug messages which may impact rendering performances" - #endif -#endif - -namespace mlx -{ - const char* VerbaliseVkResult(VkResult result) - { - switch(result) - { - case VK_SUCCESS: return "Success"; - case VK_NOT_READY: return "A fence or query has not yet completed"; - case VK_TIMEOUT: return "A wait operation has not completed in the specified time"; - case VK_EVENT_SET: return "An event is signaled"; - case VK_EVENT_RESET: return "An event is unsignaled"; - case VK_INCOMPLETE: return "A return array was too small for the result"; - case VK_ERROR_OUT_OF_HOST_MEMORY: return "A host memory allocation has failed"; - case VK_ERROR_OUT_OF_DEVICE_MEMORY: return "A device memory allocation has failed"; - case VK_ERROR_INITIALIZATION_FAILED: return "Initialization of an object could not be completed for implementation-specific reasons"; - case VK_ERROR_DEVICE_LOST: return "The logical or physical device has been lost"; - case VK_ERROR_MEMORY_MAP_FAILED: return "Mapping of a memory object has failed"; - case VK_ERROR_LAYER_NOT_PRESENT: return "A requested layer is not present or could not be loaded"; - case VK_ERROR_EXTENSION_NOT_PRESENT: return "A requested extension is not supported"; - case VK_ERROR_FEATURE_NOT_PRESENT: return "A requested feature is not supported"; - case VK_ERROR_INCOMPATIBLE_DRIVER: return "The requested version of Vulkan is not supported by the driver or is otherwise incompatible"; - case VK_ERROR_TOO_MANY_OBJECTS: return "Too many objects of the type have already been created"; - case VK_ERROR_FORMAT_NOT_SUPPORTED: return "A requested format is not supported on this device"; - case VK_ERROR_SURFACE_LOST_KHR: return "A surface is no longer available"; - case VK_SUBOPTIMAL_KHR: return "A swapchain no longer matches the surface properties exactly, but can still be used"; - case VK_ERROR_OUT_OF_DATE_KHR: return "A surface has changed in such a way that it is no longer compatible with the swapchain"; - case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR: return "The display used by a swapchain does not use the same presentable image layout"; - case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR: return "The requested window is already connected to a VkSurfaceKHR, or to some other non-Vulkan API"; - case VK_ERROR_VALIDATION_FAILED_EXT: return "A validation layer found an error"; - - default: return "Unknown Vulkan error"; - } - return nullptr; - } - - VkPipelineStageFlags AccessFlagsToPipelineStage(VkAccessFlags access_flags, VkPipelineStageFlags stage_flags) - { - VkPipelineStageFlags stages = 0; - - while(access_flags != 0) - { - VkAccessFlagBits Access_flag = static_cast(access_flags & (~(access_flags - 1))); - if(Access_flag == 0 || (Access_flag & (Access_flag - 1)) != 0) - FatalError("Vulkan : an error has been caught during access flag to pipeline stage operation"); - access_flags &= ~Access_flag; - - switch(Access_flag) - { - case VK_ACCESS_INDIRECT_COMMAND_READ_BIT: stages |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; break; - case VK_ACCESS_INDEX_READ_BIT: stages |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; break; - case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT: stages |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; break; - case VK_ACCESS_UNIFORM_READ_BIT: stages |= stage_flags | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; break; - case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT: stages |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; break; - case VK_ACCESS_SHADER_READ_BIT: stages |= stage_flags | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; break; - case VK_ACCESS_SHADER_WRITE_BIT: stages |= stage_flags | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; break; - case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: stages |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; break; - case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: stages |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; break; - case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT: stages |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; break; - case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT: stages |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; break; - case VK_ACCESS_TRANSFER_READ_BIT: stages |= VK_PIPELINE_STAGE_TRANSFER_BIT; break; - case VK_ACCESS_TRANSFER_WRITE_BIT: stages |= VK_PIPELINE_STAGE_TRANSFER_BIT; break; - case VK_ACCESS_HOST_READ_BIT: stages |= VK_PIPELINE_STAGE_HOST_BIT; break; - case VK_ACCESS_HOST_WRITE_BIT: stages |= VK_PIPELINE_STAGE_HOST_BIT; break; - case VK_ACCESS_MEMORY_READ_BIT: break; - case VK_ACCESS_MEMORY_WRITE_BIT: break; - - default: Error("Vulkan : unknown access flag"); break; - } - } - return stages; - } - - void RenderCore::Init() - { - if(volkInitialize() != VK_SUCCESS) - FatalError("Vulkan loader : cannot load %, are you sure Vulkan is installed on your system ?", VULKAN_LIB_NAME); - - m_instance.Init(); - volkLoadInstance(m_instance.Get()); - m_layers.Init(); - m_device.Init(); - volkLoadDevice(m_device.Get()); - m_queues.Init(); - m_allocator.Init(); - m_cmd_manager.Init(); - m_is_init = true; - } - - void RenderCore::Destroy() - { - if(!m_is_init) - return; - - vkDeviceWaitIdle(m_device.Get()); - - m_pool_manager.DestroyAllPools(); - m_cmd_manager.Destroy(); - m_allocator.Destroy(); - m_device.Destroy(); - m_layers.Destroy(); - m_instance.Destroy(); - - m_is_init = false; - } -} diff --git a/runtime/Sources/Renderer/Core/Semaphore.cpp b/runtime/Sources/Renderer/Core/Semaphore.cpp deleted file mode 100644 index 8a9d72a..0000000 --- a/runtime/Sources/Renderer/Core/Semaphore.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Semaphore.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:01:08 by maldavid #+# #+# */ -/* Updated: 2024/04/23 18:55:42 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include - -namespace mlx -{ - void Semaphore::Init() - { - VkSemaphoreCreateInfo semaphore_info{}; - semaphore_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; - - VkResult res; - if((res = vkCreateSemaphore(RenderCore::Get().GetDevice().Get(), &semaphore_info, nullptr, &m_semaphore)) != VK_SUCCESS) - FatalError("Vulkan : failed to create a synchronization object (semaphore), %", VerbaliseVkResult(res)); - DebugLog("Vulkan : created new semaphores"); - } - - void Semaphore::Destroy() noexcept - { - vkDestroySemaphore(RenderCore::Get().GetDevice().Get(), m_semaphore, nullptr); - m_semaphore = VK_NULL_HANDLE; - DebugLog("Vulkan : destroyed semaphore"); - } -} diff --git a/runtime/Sources/Renderer/Core/Surface.cpp b/runtime/Sources/Renderer/Core/Surface.cpp deleted file mode 100644 index 4715afe..0000000 --- a/runtime/Sources/Renderer/Core/Surface.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Surface.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 18:58:49 by maldavid #+# #+# */ -/* Updated: 2024/04/23 18:56:56 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include "render_core.h" -#include -#include - -namespace mlx -{ - void Surface::Create(Renderer& renderer) - { - if(glfwCreateWindowSurface(RenderCore::Get().GetInstance().Get(), renderer.GetWindow()->GetNativeWindow(), NULL, &m_surface) != VK_SUCCESS) - FatalError("Vulkan : failed to create a surface"); - DebugLog("Vulkan : created new surface"); - } - - VkSurfaceFormatKHR Surface::ChooseSwapSurfaceFormat(const std::vector& available_formats) - { - auto it = std::find_if(available_formats.begin(), available_formats.end(), [](VkSurfaceFormatKHR format) - { - return format.format == VK_FORMAT_R8G8B8A8_SRGB && format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR; - }); - - return (it == available_formats.end() ? available_formats[0] : *it); - } - - void Surface::Destroy() noexcept - { - vkDestroySurfaceKHR(RenderCore::Get().GetInstance().Get(), m_surface, nullptr); - m_surface = VK_NULL_HANDLE; - DebugLog("Vulkan : destroyed a surface"); - } -} diff --git a/runtime/Sources/Renderer/Core/ValidationLayers.cpp b/runtime/Sources/Renderer/Core/ValidationLayers.cpp deleted file mode 100644 index d64a033..0000000 --- a/runtime/Sources/Renderer/Core/ValidationLayers.cpp +++ /dev/null @@ -1,122 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* ValidationLayers.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/12/19 14:05:25 by maldavid #+# #+# */ -/* Updated: 2024/04/23 19:20:21 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include - -namespace mlx -{ - void ValidationLayers::Init() - { - if constexpr(!enable_validation_layers) - return; - - std::uint32_t extension_count; - vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, nullptr); - std::vector extensions(extension_count); - vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, extensions.data()); - if(!std::any_of(extensions.begin(), extensions.end(), [=](VkExtensionProperties ext) { return std::strcmp(ext.extensionName, VK_EXT_DEBUG_UTILS_EXTENSION_NAME) == 0; })) - { - Warning("Vulkan : %s not present, debug utils are disabled", VK_EXT_DEBUG_UTILS_EXTENSION_NAME); - return; - } - - VkDebugUtilsMessengerCreateInfoEXT create_info{}; - populateDebugMessengerCreateInfo(create_info); - VkResult res = createDebugUtilsMessengerEXT(&create_info, nullptr); - if(res != VK_SUCCESS) - Warning("Vulkan : failed to set up debug messenger, %", VerbaliseVkResult(res)); - else - DebugLog("Vulkan : enabled validation layers"); - - f_vkSetDebugUtilsObjectNameEXT = (PFN_vkSetDebugUtilsObjectNameEXT)vkGetInstanceProcAddr(RenderCore::Get().GetInstance().Get(), "vkSetDebugUtilsObjectNameEXT"); - if(!f_vkSetDebugUtilsObjectNameEXT) - Warning("Vulkan : failed to set up debug object names, %", VerbaliseVkResult(VK_ERROR_EXTENSION_NOT_PRESENT)); - else - DebugLog("Vulkan : enabled debug object names"); - } - - bool ValidationLayers::CheckValidationLayerSupport() - { - std::uint32_t layer_count; - vkEnumerateInstanceLayerProperties(&layer_count, nullptr); - - std::vector available_layers(layer_count); - vkEnumerateInstanceLayerProperties(&layer_count, available_layers.data()); - - return std::all_of(validation_layers.begin(), validation_layers.end(), [&](const char* layer_name) - { - if(!std::any_of(available_layers.begin(), available_layers.end(), [=](VkLayerProperties props) { return std::strcmp(layer_name, props.layer_name) == 0; })) - { - Error("Vulkan : a validation layer was requested but was not found ('%')", layer_name); - return false; - } - return true; - }); - } - - VkResult ValidationLayers::SetDebugUtilsObjectNameEXT(VkObjectType object_type, std::uint64_t object_handle, const char* object_name) - { - if(!f_vkSetDebugUtilsObjectNameEXT) - return VK_ERROR_EXTENSION_NOT_PRESENT; - - VkDebugUtilsObjectNameInfoEXT name_info{}; - name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT; - name_info.objectType = object_type; - name_info.objectHandle = object_handle; - name_info.pObjectName = object_name; - return f_vkSetDebugUtilsObjectNameEXT(RenderCore::Get().GetDevice().Get(), &name_info); - } - - void ValidationLayers::PopulateDebugMessengerCreateInfo(VkDebugUtilsMessengerCreateInfoEXT& create_info) - { - create_info = {}; - create_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; - create_info.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT; - create_info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; - create_info.pfnUserCallback = ValidationLayers::DebugCallback; - } - - void ValidationLayers::Destroy() - { - if constexpr(enable_validation_layers) - { - DestroyDebugUtilsMessengerEXT(nullptr); - #ifdef DEBUG - DebugLog("Vulkan : destroyed validation layers"); - #endif - } - } - - VkResult ValidationLayers::CreateDebugUtilsMessengerEXT(const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator) - { - auto func = (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(RenderCore::Get().GetInstance().Get(), "vkCreateDebugUtilsMessengerEXT"); - return func != nullptr ? func(RenderCore::Get().GetInstance().Get(), pCreateInfo, pAllocator, &m_debug_messenger) : VK_ERROR_EXTENSION_NOT_PRESENT; - } - - VKAPI_ATTR VkBool32 VKAPI_CALL ValidationLayers::DebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, [[maybe_unused]] VkDebugUtilsMessageTypeFlagsEXT messageType, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, [[maybe_unused]] void* pUserData) - { - if(messageSeverity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) - Error(pCallbackData->pMessage); - else if(messageSeverity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) - Warning(pCallbackData->pMessage); - return VK_FALSE; - } - - void ValidationLayers::destroyDebugUtilsMessengerEXT(const VkAllocationCallbacks* pAllocator) - { - auto func = (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(RenderCore::Get().GetInstance().Get(), "vkDestroyDebugUtilsMessengerEXT"); - if(func != nullptr) - func(RenderCore::Get().GetInstance().Get(), m_debug_messenger, pAllocator); - } - -} diff --git a/runtime/Sources/Renderer/Descriptor.cpp b/runtime/Sources/Renderer/Descriptor.cpp new file mode 100644 index 0000000..8d4ee93 --- /dev/null +++ b/runtime/Sources/Renderer/Descriptor.cpp @@ -0,0 +1,141 @@ +#include + +#include +#include +#include +#include +#include + +namespace mlx +{ + void TransitionImageToCorrectLayout(Image& image, VkCommandBuffer cmd) + { + if(!image.IsInit()) + return; + if(image.GetType() == ImageType::Color) + image.TransitionLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, cmd); + else + Error("Vulkan : cannot transition descriptor image layout, unkown image type"); + } + + DescriptorSet::DescriptorSet(const ShaderSetLayout& layout, VkDescriptorSetLayout vklayout, ShaderType shader_type) + : m_set_layout(vklayout) + { + for(auto& [binding, type] : layout.binds) + { + m_descriptors.emplace_back(); + m_descriptors.back().type = type; + m_descriptors.back().shader_type = shader_type; + m_descriptors.back().binding = binding; + } + for(std::size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) + m_set[i] = kvfAllocateDescriptorSet(RenderCore::Get().GetDevice(), vklayout); + } + + DescriptorSet::DescriptorSet(VkDescriptorSetLayout layout, const std::vector& descriptors) + : m_set_layout(layout), m_descriptors(descriptors) + { + for(std::size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) + m_set[i] = kvfAllocateDescriptorSet(RenderCore::Get().GetDevice(), layout); + } + + void DescriptorSet::SetImage(std::size_t i, std::uint32_t binding, class Image& image) + { + Verify(m_set[i] != VK_NULL_HANDLE, "invalid descriptor"); + auto it = std::find_if(m_descriptors.begin(), m_descriptors.end(), [=](Descriptor descriptor) + { + return binding == descriptor.binding; + }); + if(it == m_descriptors.end()) + { + Warning("Vulkan : cannot update descriptor set image; invalid binding"); + return; + } + if(it->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) + { + Error("Vulkan : trying to bind an image to the wrong descriptor"); + return; + } + it->image_ptr = ℑ + } + + void DescriptorSet::SetStorageBuffer(std::size_t i, std::uint32_t binding, class GPUBuffer& buffer) + { + Verify(m_set[i] != VK_NULL_HANDLE, "invalid descriptor"); + auto it = std::find_if(m_descriptors.begin(), m_descriptors.end(), [=](Descriptor descriptor) + { + return binding == descriptor.binding; + }); + if(it == m_descriptors.end()) + { + Warning("Vulkan : cannot update descriptor set buffer; invalid binding"); + return; + } + if(it->type != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) + { + Error("Vulkan : trying to bind a buffer to the wrong descriptor"); + return; + } + it->storage_buffer_ptr = &buffer; + } + + void DescriptorSet::SetUniformBuffer(std::size_t i, std::uint32_t binding, class GPUBuffer& buffer) + { + Verify(m_set[i] != VK_NULL_HANDLE, "invalid descriptor"); + auto it = std::find_if(m_descriptors.begin(), m_descriptors.end(), [=](Descriptor descriptor) + { + return binding == descriptor.binding; + }); + if(it == m_descriptors.end()) + { + Warning("Vulkan : cannot update descriptor set buffer; invalid binding"); + return; + } + if(it->type != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) + { + Error("Vulkan : trying to bind a buffer to the wrong descriptor"); + return; + } + it->uniform_buffer_ptr = &buffer; + } + + void DescriptorSet::Update(std::size_t i, VkCommandBuffer cmd) noexcept + { + Verify(m_set[i] != VK_NULL_HANDLE, "invalid descriptor"); + std::vector writes; + std::vector buffer_infos; + std::vector image_infos; + for(auto& descriptor : m_descriptors) + { + if(descriptor.image_ptr) + { + TransitionImageToCorrectLayout(*descriptor.image_ptr, cmd); + VkDescriptorImageInfo info{}; + info.sampler = descriptor.image_ptr->GetSampler(); + info.imageLayout = descriptor.image_ptr->GetLayout(); + info.imageView = descriptor.image_ptr->GetImageView(); + image_infos.push_back(info); + writes.push_back(kvfWriteImageToDescriptorSet(RenderCore::Get().GetDevice(), m_set[i], &image_infos.back(), descriptor.binding)); + } + else if(descriptor.uniform_buffer_ptr) + { + VkDescriptorBufferInfo info{}; + info.buffer = descriptor.uniform_buffer_ptr->Get(); + info.offset = descriptor.uniform_buffer_ptr->GetOffset(); + info.range = VK_WHOLE_SIZE; + buffer_infos.push_back(info); + writes.push_back(kvfWriteUniformBufferToDescriptorSet(RenderCore::Get().GetDevice(), m_set[i], &buffer_infos.back(), descriptor.binding)); + } + else if(descriptor.storage_buffer_ptr) + { + VkDescriptorBufferInfo info{}; + info.buffer = descriptor.storage_buffer_ptr->Get(); + info.offset = descriptor.storage_buffer_ptr->GetOffset(); + info.range = VK_WHOLE_SIZE; + buffer_infos.push_back(info); + writes.push_back(kvfWriteStorageBufferToDescriptorSet(RenderCore::Get().GetDevice(), m_set[i], &buffer_infos.back(), descriptor.binding)); + } + } + vkUpdateDescriptorSets(RenderCore::Get().GetDevice(), writes.size(), writes.data(), 0, nullptr); + } +} diff --git a/runtime/Sources/Renderer/Descriptors/DescriptorPool.cpp b/runtime/Sources/Renderer/Descriptors/DescriptorPool.cpp deleted file mode 100644 index 2cd8085..0000000 --- a/runtime/Sources/Renderer/Descriptors/DescriptorPool.cpp +++ /dev/null @@ -1,69 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* DescriptorPool.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/23 18:34:23 by maldavid #+# #+# */ -/* Updated: 2024/04/23 19:39:39 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include -#include - -namespace mlx -{ - void DescriptorPool::Init(std::vector sizes) - { - VkDescriptorPoolCreateInfo pool_info{}; - pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; - pool_info.poolSizeCount = sizes.size(); - pool_info.pPoolSizes = sizes.data(); - pool_info.maxSets = MAX_SETS_PER_POOL; - pool_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; - - VkResult res = vkCreateDescriptorPool(RenderCore::Get().GetDevice().Get(), &pool_info, nullptr, &m_pool); - if(res != VK_SUCCESS) - FatalError("Vulkan : failed to create descriptor pool, %", VerbaliseVkResult(res)); - DebugLog("Vulkan : created new descriptor pool"); - } - - VkDescriptorSet DescriptorPool::AllocateDescriptorSet(class DescriptorSetLayout& layout) - { - VkDescriptorSet set; - - VkDescriptorSetAllocateInfo alloc_info{}; - alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; - alloc_info.descriptorPool = m_pool; - alloc_info.descriptorSetCount = 1; - alloc_info.pSetLayouts = layouts.Get(); - - VkResult res = vkAllocateDescriptorSets(RenderCore::Get().GetDevice().Get(), &alloc_info, &set); - if(res != VK_SUCCESS) - FatalError("Vulkan : failed to allocate descriptor set, %", VerbaliseVkResult(res)); - m_allocated_sets++; - DebugLog("Vulkan : created new descriptor set"); - return set; - } - - void DescriptorPool::FreeDescriptor(VkDescriptorSet set) - { - if(!IsInit()) - return; - vkFreeDescriptorSets(RenderCore::Get().GetDevice().Get(), m_pool, 1, set); - m_allocated_sets--; // if this goes underflow I quit - } - - void DescriptorPool::Destroy() noexcept - { - if(m_pool != VK_NULL_HANDLE) - vkDestroyDescriptorPool(RenderCore::Get().GetDevice().Get(), m_pool, nullptr); - m_pool = VK_NULL_HANDLE; - DebugLog("Vulkan : destroyed a descriptor pool"); - } -} diff --git a/runtime/Sources/Renderer/Descriptors/DescriptorPoolManager.cpp b/runtime/Sources/Renderer/Descriptors/DescriptorPoolManager.cpp deleted file mode 100644 index 081f887..0000000 --- a/runtime/Sources/Renderer/Descriptors/DescriptorPoolManager.cpp +++ /dev/null @@ -1,41 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* DescriptorPoolManager.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/20 06:51:47 by maldavid #+# #+# */ -/* Updated: 2024/04/23 19:41:38 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include - -namespace mlx -{ - DescriptorPool& DescriptorPoolManager::GetAvailablePool() - { - for(auto& pool : m_pools) - { - if(pool.GetNumberOfSetsAllocated() < MAX_SETS_PER_POOL) - return pool; - } - std::vector pool_sizes = { - { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, (MAX_FRAMES_IN_FLIGHT * NUMBER_OF_UNIFORM_BUFFERS) }, - { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, MAX_SETS_PER_POOL - (MAX_FRAMES_IN_FLIGHT * NUMBER_OF_UNIFORM_BUFFERS) } - }; - m_pools.emplace_front().Init(std::move(pool_sizes)); - return m_pools.front(); - } - - void DescriptorPoolManager::DestroyAllPools() - { - for(auto& pool : m_pools) - pool.Destroy(); - m_pools.clear(); - } -} diff --git a/runtime/Sources/Renderer/Descriptors/DescriptorSet.cpp b/runtime/Sources/Renderer/Descriptors/DescriptorSet.cpp deleted file mode 100644 index 75d6427..0000000 --- a/runtime/Sources/Renderer/Descriptors/DescriptorSet.cpp +++ /dev/null @@ -1,116 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* DescriptorSet.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/23 18:40:44 by maldavid #+# #+# */ -/* Updated: 2024/04/23 21:17:39 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include -#include -#include -#include -#include - -namespace mlx -{ - void DescriptorSet::Init(NonOwningPtr renderer, NonOwningPtr pool, DescriptorSetLayout layout) - { - MLX_PROFILE_FUNCTION(); - p_renderer = renderer; - m_layout = layout; - p_pool = pool; - - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - m_desc_set[i] = pool->AllocateDescriptorSet(layout); - } - - void DescriptorSet::WriteDescriptor(int binding, NonOwningPtr ubo) const noexcept - { - MLX_PROFILE_FUNCTION(); - auto device = RenderCore::Get().GetDevice().Get(); - - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - { - VkDescriptorBufferInfo buffer_info{}; - buffer_info.buffer = ubo->Get(i); - buffer_info.offset = ubo->GetOffset(i); - buffer_info.range = ubo->GetSize(i); - - VkWriteDescriptorSet descriptor_write{}; - descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - descriptor_write.dstSet = m_desc_set[i]; - descriptor_write.dstBinding = binding; - descriptor_write.dstArrayElement = 0; - descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; - descriptor_write.descriptorCount = 1; - descriptor_write.pBufferInfo = &buffer_info; - - vkUpdateDescriptorSets(device, 1, &descriptor_write, 0, nullptr); - } - } - - void DescriptorSet::WriteDescriptor(int binding, const Image& image) const noexcept - { - MLX_PROFILE_FUNCTION(); - auto device = RenderCore::Get().GetDevice().Get(); - - VkDescriptorImageInfo image_info{}; - image_info.imageLayout = image.GetLayout(); - image_info.imageView = image.GetImageView(); - image_info.sampler = image.GetSampler(); - - VkWriteDescriptorSet descriptor_write{}; - descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - descriptor_write.dstSet = m_desc_set[m_renderer->GetActiveImageIndex()]; - descriptor_write.dstBinding = binding; - descriptor_write.dstArrayElement = 0; - descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; - descriptor_write.descriptorCount = 1; - descriptor_write.pImageInfo = &image_info; - - vkUpdateDescriptorSets(device, 1, &descriptor_write, 0, nullptr); - } - - void DescriptorSet::Bind() noexcept - { - vkCmdBindDescriptorSets(p_renderer->GetActiveCmdBuffer().Get(), VK_PIPELINE_BIND_POINT_GRAPHICS, p_renderer->GetPipeline().GetPipelineLayout(), 0, 1, m_desc_set[p_renderer->GetActiveImageIndex()], 0, nullptr); - } - - DescriptorSet DescriptorSet::Duplicate() - { - MLX_PROFILE_FUNCTION(); - DescriptorSet set; - set.Init(p_renderer, &RenderCore::Get().GetDescriptorPool(), m_layout); - return set; - } - - VkDescriptorSet& DescriptorSet::operator()() noexcept - { - return m_desc_set[p_renderer->GetActiveImageIndex()]; - } - - VkDescriptorSet& DescriptorSet::Get() noexcept - { - return m_desc_set[p_renderer->GetActiveImageIndex()]; - } - - void DescriptorSet::Destroy() noexcept - { - MLX_PROFILE_FUNCTION(); - if(p_pool != nullptr && RenderCore::Get().IsInit()) // checks if the render core is still init (it should always be init but just in case) - p_pool->FreeDescriptor(*this); - for(auto& set : m_desc_set) - { - if(set != VK_NULL_HANDLE) - set = VK_NULL_HANDLE; - } - } -} diff --git a/runtime/Sources/Renderer/Descriptors/DescriptorSetLayout.cpp b/runtime/Sources/Renderer/Descriptors/DescriptorSetLayout.cpp deleted file mode 100644 index cc61a34..0000000 --- a/runtime/Sources/Renderer/Descriptors/DescriptorSetLayout.cpp +++ /dev/null @@ -1,49 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* DescriptorSetLayout.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/23 18:37:28 by maldavid #+# #+# */ -/* Updated: 2024/04/23 19:52:41 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include - -namespace mlx -{ - void DescriptorSetLayout::Init(std::vector> binds, VkShaderStageFlagBits stage) - { - std::vector bindings(binds.size()); - for(std::size_t i = 0; i < binds.size(); i++) - { - bindings[i].binding = binds[i].first; - bindings[i].descriptorCount = 1; - bindings[i].descriptorType = binds[i].second; - bindings[i].pImmutableSamplers = nullptr; - bindings[i].stageFlags = stage; - } - - m_bindings = std::move(binds); - - VkDescriptorSetLayoutCreateInfo layout_info{}; - layout_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; - layout_info.bindingCount = m_bindings.size(); - layout_info.pBindings = m_bindings.data(); - - VkResult res = vkCreateDescriptorSetLayout(RenderCore::Get().GetDevice().Get(), &layout_info, nullptr, &m_layout); - if(res != VK_SUCCESS) - FatalError("Vulkan : failed to create descriptor set layout, %", VerbaliseVkResult(res)); - } - - void DescriptorSetLayout::Destroy() noexcept - { - vkDestroyDescriptorSetLayout(RenderCore::Get().GetDevice().Get(), m_layout, nullptr); - m_layout = VK_NULL_HANDLE; - } -} diff --git a/runtime/Sources/Renderer/Image.cpp b/runtime/Sources/Renderer/Image.cpp new file mode 100644 index 0000000..5ee95d0 --- /dev/null +++ b/runtime/Sources/Renderer/Image.cpp @@ -0,0 +1,113 @@ +#include +#include +#include +#include + +namespace mlx +{ + void Image::Init(ImageType type, std::uint32_t width, std::uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VkMemoryPropertyFlags properties, bool is_multisampled) + { + m_type = type; + m_width = width; + m_height = height; + m_format = format; + m_tiling = tiling; + m_is_multisampled = is_multisampled; + + VmaAllocationCreateInfo alloc_info{}; + alloc_info.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; + + VkImageCreateInfo image_info{}; + image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + image_info.imageType = VK_IMAGE_TYPE_2D; + image_info.extent.width = width; + image_info.extent.height = height; + image_info.extent.depth = 1; + image_info.mipLevels = 1; + image_info.arrayLayers = 1; + image_info.format = format; + image_info.tiling = tiling; + image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + image_info.usage = usage; + image_info.samples = (m_is_multisampled ? VK_SAMPLE_COUNT_4_BIT : VK_SAMPLE_COUNT_1_BIT); + image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + m_allocation = RenderCore::Get().GetAllocator().CreateImage(&image_info, alloc_info, &m_image); + } + + void Image::CreateImageView(VkImageViewType type, VkImageAspectFlags aspect_flags, int layer_count) noexcept + { + m_image_view = kvfCreateImageView(RenderCore::Get().GetDevice(), m_image, m_format, type, aspect_flags, layer_count); + } + + void Image::CreateSampler() noexcept + { + m_sampler = kvfCreateSampler(RenderCore::Get().GetDevice(), VK_FILTER_NEAREST, VK_SAMPLER_ADDRESS_MODE_REPEAT, VK_SAMPLER_MIPMAP_MODE_NEAREST); + } + + void Image::TransitionLayout(VkImageLayout new_layout, VkCommandBuffer cmd) + { + if(new_layout == m_layout) + return; + bool is_single_time_cmd_buffer = (cmd == VK_NULL_HANDLE); + if(is_single_time_cmd_buffer) + cmd = kvfCreateCommandBuffer(RenderCore::Get().GetDevice()); + KvfImageType kvf_type = KVF_IMAGE_OTHER; + switch(m_type) + { + case ImageType::Color: kvf_type = KVF_IMAGE_COLOR; break; + default: break; + } + kvfTransitionImageLayout(RenderCore::Get().GetDevice(), m_image, kvf_type, cmd, m_format, m_layout, new_layout, is_single_time_cmd_buffer); + m_layout = new_layout; + } + + void Image::Clear(VkCommandBuffer cmd, Vec4f color) + { + VkImageSubresourceRange subresource_range{}; + subresource_range.baseMipLevel = 0; + subresource_range.layerCount = (m_type == ImageType::Cube ? 6 : 1); + subresource_range.levelCount = 1; + subresource_range.baseArrayLayer = 0; + + if(m_type == ImageType::Color || m_type == ImageType::Cube) + { + VkImageLayout old_layout = m_layout; + TransitionLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, cmd); + subresource_range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + VkClearColorValue clear_color = VkClearColorValue({ { color.x, color.y, color.z, color.w } }); + vkCmdClearColorImage(cmd, m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &subresource_range); + TransitionLayout(old_layout, cmd); + } + else if(m_type == ImageType::Depth) + { + VkClearDepthStencilValue clear_depth_stencil = { 1.0f, 1 }; + subresource_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; + TransitionLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, cmd); + vkCmdClearDepthStencilImage(cmd, m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_depth_stencil, 1, &subresource_range); + } + } + + void Image::DestroySampler() noexcept + { + if(m_sampler != VK_NULL_HANDLE) + kvfDestroySampler(RenderCore::Get().GetDevice(), m_sampler); + m_sampler = VK_NULL_HANDLE; + } + + void Image::DestroyImageView() noexcept + { + if(m_image_view != VK_NULL_HANDLE) + kvfDestroyImageView(RenderCore::Get().GetDevice(), m_image_view); + m_image_view = VK_NULL_HANDLE; + } + + void Image::Destroy() noexcept + { + DestroySampler(); + DestroyImageView(); + + if(m_image != VK_NULL_HANDLE) + RenderCore::Get().GetAllocator().DestroyImage(m_allocation, m_image); + m_image = VK_NULL_HANDLE; + } +} diff --git a/runtime/Sources/Renderer/Images/Image.cpp b/runtime/Sources/Renderer/Images/Image.cpp deleted file mode 100644 index d6e62ba..0000000 --- a/runtime/Sources/Renderer/Images/Image.cpp +++ /dev/null @@ -1,393 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Image.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/25 11:59:07 by maldavid #+# #+# */ -/* Updated: 2024/04/23 20:02:25 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include - -namespace mlx -{ - bool IsStencilFormat(VkFormat format) - { - switch(format) - { - case VK_FORMAT_D32_SFLOAT_S8_UINT: - case VK_FORMAT_D24_UNORM_S8_UINT: - return true; - - default: return false; - } - } - - bool IsDepthFormat(VkFormat format) - { - switch(format) - { - case VK_FORMAT_D16_UNORM: - case VK_FORMAT_D32_SFLOAT: - case VK_FORMAT_D32_SFLOAT_S8_UINT: - case VK_FORMAT_D24_UNORM_S8_UINT: - case VK_FORMAT_D16_UNORM_S8_UINT: - return true; - - default: return false; - } - } - - VkFormat BitsToFormat(std::uint32_t bits) - { - switch(bits) - { - case 8: return VK_FORMAT_R8_UNORM; - case 16: return VK_FORMAT_R8G8_UNORM; - case 24: return VK_FORMAT_R8G8B8_UNORM; - case 32: return VK_FORMAT_R8G8B8A8_UNORM; - case 48: return VK_FORMAT_R16G16B16_SFLOAT; - case 64: return VK_FORMAT_R16G16B16A16_SFLOAT; - case 96: return VK_FORMAT_R32G32B32_SFLOAT; - case 128: return VK_FORMAT_R32G32B32A32_SFLOAT; - - default: - FatalError("Vulkan : unsupported image bit-depth"); - return VK_FORMAT_R8G8B8A8_UNORM; - } - } - - VkPipelineStageFlags LayoutToAccessMask(VkImageLayout layout, bool is_destination) - { - VkPipelineStageFlags access_mask = 0; - - switch(layout) - { - case VK_IMAGE_LAYOUT_UNDEFINED: - if(is_destination) - Error("Vulkan : the new layout used in a transition must not be VK_IMAGE_LAYOUT_UNDEFINED"); - break; - case VK_IMAGE_LAYOUT_GENERAL: access_mask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; break; - case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: access_mask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; break; - case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; break; - case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: - access_mask = VK_ACCESS_SHADER_READ_BIT; // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; - break; - case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: access_mask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT; break; - case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: access_mask = VK_ACCESS_TRANSFER_READ_BIT; break; - case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: access_mask = VK_ACCESS_TRANSFER_WRITE_BIT; break; - case VK_IMAGE_LAYOUT_PREINITIALIZED: - if(!is_destination) - access_mask = VK_ACCESS_HOST_WRITE_BIT; - else - Error("Vulkan : the new layout used in a transition must not be VK_IMAGE_LAYOUT_PREINITIALIZED"); - break; - case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; break; - case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; break; - case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: access_mask = VK_ACCESS_MEMORY_READ_BIT; break; - - default: Error("Vulkan : unexpected image layout"); break; - } - - return access_mask; - } - - void Image::Create(std::uint32_t width, std::uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, const char* name, bool dedicated_memory) - { - m_width = width; - m_height = height; - m_format = format; - m_tiling = tiling; - - VkImageCreateInfo image_info{}; - image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; - image_info.imageType = VK_IMAGE_TYPE_2D; - image_info.extent.width = width; - image_info.extent.height = height; - image_info.extent.depth = 1; - image_info.mipLevels = 1; - image_info.arrayLayers = 1; - image_info.format = format; - image_info.tiling = tiling; - image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; - image_info.usage = usage; - image_info.samples = VK_SAMPLE_COUNT_1_BIT; - image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; - - VmaAllocationCreateInfo alloc_info{}; - alloc_info.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; - if(dedicated_memory) - { - alloc_info.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; - alloc_info.priority = 1.0f; - } - - m_allocation = RenderCore::Get().GetAllocator().CreateImage(&image_info, &alloc_info, m_image, name); - #ifdef DEBUG - m_name = name; - #endif - } - - void Image::CreateImageView(VkImageViewType type, VkImageAspectFlags aspect_flags) noexcept - { - VkImageViewCreateInfo view_info{}; - view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; - view_info.image = m_image; - view_info.viewType = type; - view_info.format = m_format; - view_info.subresourceRange.aspectMask = aspect_flags; - view_info.subresourceRange.baseMipLevel = 0; - view_info.subresourceRange.levelCount = 1; - view_info.subresourceRange.baseArrayLayer = 0; - view_info.subresourceRange.layerCount = 1; - - VkResult res = vkCreateImageView(RenderCore::Get().GetDevice().Get(), &view_info, nullptr, &m_image_view); - if(res != VK_SUCCESS) - FatalError("Vulkan : failed to create an image view, %s", VerbaliseVkResult(res)); - #ifdef DEBUG - else - RenderCore::Get().GetLayers().SetDebugUtilsObjectNameEXT(VK_OBJECT_TYPE_IMAGE_VIEW, (std::uint64_t)m_image_view, m_name.c_str()); - #endif - } - - void Image::CreateSampler() noexcept - { - VkSamplerCreateInfo info{}; - info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; - info.magFilter = VK_FILTER_NEAREST; - info.minFilter = VK_FILTER_NEAREST; - info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; - info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT; - info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT; - info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT; - info.minLod = -1000; - info.maxLod = 1000; - info.anisotropyEnable = VK_FALSE; - info.maxAnisotropy = 1.0f; - - VkResult res = vkCreateSampler(RenderCore::Get().GetDevice().Get(), &info, nullptr, &m_sampler); - if(res != VK_SUCCESS) - FatalError("Vulkan : failed to create an image sampler, %", VerbaliseVkResult(res)); - #ifdef DEBUG - else - RenderCore::Get().GetLayers().SetDebugUtilsObjectNameEXT(VK_OBJECT_TYPE_SAMPLER, (std::uint64_t)m_sampler, m_name.c_str()); - #endif - } - - void Image::CopyFromBuffer(Buffer& buffer) - { - CommandBuffer& cmd = RenderCore::Get().GetSingleTimeCmdBuffer(); - cmd.BeginRecord(); - - VkImageLayout layout_save = m_layout; - TransitionLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &cmd); - - cmd.CopyBufferToImage(buffer, *this); - - TransitionLayout(layout_save, &cmd); - - cmd.EndRecord(); - cmd.SubmitIdle(); - } - - void Image::CopyToBuffer(Buffer& buffer) - { - CommandBuffer& cmd = RenderCore::Get().GetSingleTimeCmdBuffer(); - cmd.BeginRecord(); - - VkImageLayout layout_save = m_layout; - TransitionLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &cmd); - - cmd.CopyImagetoBuffer(*this, buffer); - - TransitionLayout(layout_save, &cmd); - - cmd.EndRecord(); - cmd.SubmitIdle(); - } - - void Image::TransitionLayout(VkImageLayout new_layout, NonOwningPtr cmd) - { - if(new_layout == m_layout) - return; - - bool single_time = (cmd == nullptr); - if(single_time) - { - cmd = &RenderCore::Get().GetSingleTimeCmdBuffer(); - cmd->BeginRecord(); - } - - cmd->TransitionImageLayout(*this, new_layout); - - if(single_time) - { - cmd->EndRecord(); - cmd->SubmitIdle(); - } - m_layout = new_layout; - } - - void Image::DestroySampler() noexcept - { - if(m_sampler != VK_NULL_HANDLE) - vkDestroySampler(RenderCore::Get().GetDevice().Get(), m_sampler, nullptr); - m_sampler = VK_NULL_HANDLE; - } - - void Image::DestroyImageView() noexcept - { - if(m_image_view != VK_NULL_HANDLE) - vkDestroyImageView(RenderCore::Get().GetDevice().Get(), m_image_view, nullptr); - m_image_view = VK_NULL_HANDLE; - } - - void Image::Destroy() noexcept - { - DestroySampler(); - DestroyImageView(); - - if(m_image != VK_NULL_HANDLE) - RenderCore::Get().GetAllocator().DestroyImage(m_allocation, m_image); - m_image = VK_NULL_HANDLE; - } - - std::uint32_t FormatSize(VkFormat format) - { - switch(format) - { - case VK_FORMAT_UNDEFINED: return 0; - case VK_FORMAT_R4G4_UNORM_PACK8: return 1; - case VK_FORMAT_R4G4B4A4_UNORM_PACK16: return 2; - case VK_FORMAT_B4G4R4A4_UNORM_PACK16: return 2; - case VK_FORMAT_R5G6B5_UNORM_PACK16: return 2; - case VK_FORMAT_B5G6R5_UNORM_PACK16: return 2; - case VK_FORMAT_R5G5B5A1_UNORM_PACK16: return 2; - case VK_FORMAT_B5G5R5A1_UNORM_PACK16: return 2; - case VK_FORMAT_A1R5G5B5_UNORM_PACK16: return 2; - case VK_FORMAT_R8_UNORM: return 1; - case VK_FORMAT_R8_SNORM: return 1; - case VK_FORMAT_R8_USCALED: return 1; - case VK_FORMAT_R8_SSCALED: return 1; - case VK_FORMAT_R8_UINT: return 1; - case VK_FORMAT_R8_SINT: return 1; - case VK_FORMAT_R8_SRGB: return 1; - case VK_FORMAT_R8G8_UNORM: return 2; - case VK_FORMAT_R8G8_SNORM: return 2; - case VK_FORMAT_R8G8_USCALED: return 2; - case VK_FORMAT_R8G8_SSCALED: return 2; - case VK_FORMAT_R8G8_UINT: return 2; - case VK_FORMAT_R8G8_SINT: return 2; - case VK_FORMAT_R8G8_SRGB: return 2; - case VK_FORMAT_R8G8B8_UNORM: return 3; - case VK_FORMAT_R8G8B8_SNORM: return 3; - case VK_FORMAT_R8G8B8_USCALED: return 3; - case VK_FORMAT_R8G8B8_SSCALED: return 3; - case VK_FORMAT_R8G8B8_UINT: return 3; - case VK_FORMAT_R8G8B8_SINT: return 3; - case VK_FORMAT_R8G8B8_SRGB: return 3; - case VK_FORMAT_B8G8R8_UNORM: return 3; - case VK_FORMAT_B8G8R8_SNORM: return 3; - case VK_FORMAT_B8G8R8_USCALED: return 3; - case VK_FORMAT_B8G8R8_SSCALED: return 3; - case VK_FORMAT_B8G8R8_UINT: return 3; - case VK_FORMAT_B8G8R8_SINT: return 3; - case VK_FORMAT_B8G8R8_SRGB: return 3; - case VK_FORMAT_R8G8B8A8_UNORM: return 4; - case VK_FORMAT_R8G8B8A8_SNORM: return 4; - case VK_FORMAT_R8G8B8A8_USCALED: return 4; - case VK_FORMAT_R8G8B8A8_SSCALED: return 4; - case VK_FORMAT_R8G8B8A8_UINT: return 4; - case VK_FORMAT_R8G8B8A8_SINT: return 4; - case VK_FORMAT_R8G8B8A8_SRGB: return 4; - case VK_FORMAT_B8G8R8A8_UNORM: return 4; - case VK_FORMAT_B8G8R8A8_SNORM: return 4; - case VK_FORMAT_B8G8R8A8_USCALED: return 4; - case VK_FORMAT_B8G8R8A8_SSCALED: return 4; - case VK_FORMAT_B8G8R8A8_UINT: return 4; - case VK_FORMAT_B8G8R8A8_SINT: return 4; - case VK_FORMAT_B8G8R8A8_SRGB: return 4; - case VK_FORMAT_A8B8G8R8_UNORM_PACK32: return 4; - case VK_FORMAT_A8B8G8R8_SNORM_PACK32: return 4; - case VK_FORMAT_A8B8G8R8_USCALED_PACK32: return 4; - case VK_FORMAT_A8B8G8R8_SSCALED_PACK32: return 4; - case VK_FORMAT_A8B8G8R8_UINT_PACK32: return 4; - case VK_FORMAT_A8B8G8R8_SINT_PACK32: return 4; - case VK_FORMAT_A8B8G8R8_SRGB_PACK32: return 4; - case VK_FORMAT_A2R10G10B10_UNORM_PACK32: return 4; - case VK_FORMAT_A2R10G10B10_SNORM_PACK32: return 4; - case VK_FORMAT_A2R10G10B10_USCALED_PACK32: return 4; - case VK_FORMAT_A2R10G10B10_SSCALED_PACK32: return 4; - case VK_FORMAT_A2R10G10B10_UINT_PACK32: return 4; - case VK_FORMAT_A2R10G10B10_SINT_PACK32: return 4; - case VK_FORMAT_A2B10G10R10_UNORM_PACK32: return 4; - case VK_FORMAT_A2B10G10R10_SNORM_PACK32: return 4; - case VK_FORMAT_A2B10G10R10_USCALED_PACK32: return 4; - case VK_FORMAT_A2B10G10R10_SSCALED_PACK32: return 4; - case VK_FORMAT_A2B10G10R10_UINT_PACK32: return 4; - case VK_FORMAT_A2B10G10R10_SINT_PACK32: return 4; - case VK_FORMAT_R16_UNORM: return 2; - case VK_FORMAT_R16_SNORM: return 2; - case VK_FORMAT_R16_USCALED: return 2; - case VK_FORMAT_R16_SSCALED: return 2; - case VK_FORMAT_R16_UINT: return 2; - case VK_FORMAT_R16_SINT: return 2; - case VK_FORMAT_R16_SFLOAT: return 2; - case VK_FORMAT_R16G16_UNORM: return 4; - case VK_FORMAT_R16G16_SNORM: return 4; - case VK_FORMAT_R16G16_USCALED: return 4; - case VK_FORMAT_R16G16_SSCALED: return 4; - case VK_FORMAT_R16G16_UINT: return 4; - case VK_FORMAT_R16G16_SINT: return 4; - case VK_FORMAT_R16G16_SFLOAT: return 4; - case VK_FORMAT_R16G16B16_UNORM: return 6; - case VK_FORMAT_R16G16B16_SNORM: return 6; - case VK_FORMAT_R16G16B16_USCALED: return 6; - case VK_FORMAT_R16G16B16_SSCALED: return 6; - case VK_FORMAT_R16G16B16_UINT: return 6; - case VK_FORMAT_R16G16B16_SINT: return 6; - case VK_FORMAT_R16G16B16_SFLOAT: return 6; - case VK_FORMAT_R16G16B16A16_UNORM: return 8; - case VK_FORMAT_R16G16B16A16_SNORM: return 8; - case VK_FORMAT_R16G16B16A16_USCALED: return 8; - case VK_FORMAT_R16G16B16A16_SSCALED: return 8; - case VK_FORMAT_R16G16B16A16_UINT: return 8; - case VK_FORMAT_R16G16B16A16_SINT: return 8; - case VK_FORMAT_R16G16B16A16_SFLOAT: return 8; - case VK_FORMAT_R32_UINT: return 4; - case VK_FORMAT_R32_SINT: return 4; - case VK_FORMAT_R32_SFLOAT: return 4; - case VK_FORMAT_R32G32_UINT: return 8; - case VK_FORMAT_R32G32_SINT: return 8; - case VK_FORMAT_R32G32_SFLOAT: return 8; - case VK_FORMAT_R32G32B32_UINT: return 12; - case VK_FORMAT_R32G32B32_SINT: return 12; - case VK_FORMAT_R32G32B32_SFLOAT: return 12; - case VK_FORMAT_R32G32B32A32_UINT: return 16; - case VK_FORMAT_R32G32B32A32_SINT: return 16; - case VK_FORMAT_R32G32B32A32_SFLOAT: return 16; - case VK_FORMAT_R64_UINT: return 8; - case VK_FORMAT_R64_SINT: return 8; - case VK_FORMAT_R64_SFLOAT: return 8; - case VK_FORMAT_R64G64_UINT: return 16; - case VK_FORMAT_R64G64_SINT: return 16; - case VK_FORMAT_R64G64_SFLOAT: return 16; - case VK_FORMAT_R64G64B64_UINT: return 24; - case VK_FORMAT_R64G64B64_SINT: return 24; - case VK_FORMAT_R64G64B64_SFLOAT: return 24; - case VK_FORMAT_R64G64B64A64_UINT: return 32; - case VK_FORMAT_R64G64B64A64_SINT: return 32; - case VK_FORMAT_R64G64B64A64_SFLOAT: return 32; - case VK_FORMAT_B10G11R11_UFLOAT_PACK32: return 4; - case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32: return 4; - - default: return 0; - } - } -} diff --git a/runtime/Sources/Renderer/Images/Texture.cpp b/runtime/Sources/Renderer/Images/Texture.cpp deleted file mode 100644 index 18090f8..0000000 --- a/runtime/Sources/Renderer/Images/Texture.cpp +++ /dev/null @@ -1,190 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Texture.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/03/31 18:03:35 by maldavid #+# #+# */ -/* Updated: 2024/04/23 21:52:23 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#define STB_IMAGE_IMPLEMENTATION -#include - -#include - -#include -#include -#include - -#ifdef IMAGE_OPTIMIZED - #define TILING VK_IMAGE_TILING_OPTIMAL -#else - #define TILING VK_IMAGE_TILING_LINEAR -#endif - -namespace mlx -{ - void Texture::Create(std::uint8_t* pixels, std::uint32_t width, std::uint32_t height, VkFormat format, const char* name, bool dedicated_memory) - { - MLX_PROFILE_FUNCTION(); - Image::Create(width, height, format, TILING, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, name, dedicated_memory); - Image::CreateImageView(VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_ASPECT_COLOR_BIT); - Image::CreateSampler(); - TransitionLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); - - std::vector vertex_data = { - {{0, 0}, {1.f, 1.f, 1.f, 1.f}, {0.0f, 0.0f}}, - {{width, 0}, {1.f, 1.f, 1.f, 1.f}, {1.0f, 0.0f}}, - {{width, height}, {1.f, 1.f, 1.f, 1.f}, {1.0f, 1.0f}}, - {{0, height}, {1.f, 1.f, 1.f, 1.f}, {0.0f, 1.0f}} - }; - - std::vector index_data = { 0, 1, 2, 2, 3, 0 }; - - #ifdef DEBUG - m_vbo.Create(sizeof(Vertex) * vertex_data.size(), vertex_data.data(), name); - m_ibo.Create(sizeof(std::uint16_t) * index_data.size(), index_data.data(), name); - m_name = name; - #else - m_vbo.Create(sizeof(Vertex) * vertex_data.size(), vertex_data.data(), nullptr); - m_ibo.Create(sizeof(std::uint16_t) * index_data.size(), index_data.data(), nullptr); - #endif - - Buffer staging_buffer; - std::size_t size = width * height * formatSize(format); - if(pixels != nullptr) - { - #ifdef DEBUG - staging_buffer.Create(BufferType::HighDynamic, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, name, pixels); - #else - staging_buffer.Create(BufferType::HighDynamic, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, nullptr, pixels); - #endif - } - else - { - std::vector default_pixels(width * height, 0x00000000); - #ifdef DEBUG - staging_buffer.Create(BufferType::HighDynamic, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, name, default_pixels.data()); - #else - staging_buffer.Create(BufferType::HighDynamic, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, nullptr, default_pixels.data()); - #endif - } - Image::CopyFromBuffer(staging_buffer); - staging_buffer.Destroy(); - } - - void Texture::SetPixel(int x, int y, std::uint32_t color) noexcept - { - MLX_PROFILE_FUNCTION(); - if(x < 0 || y < 0 || static_cast(x) > GetWidth() || static_cast(y) > GetHeight()) - return; - if(m_map == nullptr) - PpenCPUmap(); - m_cpu_map[(y * GetWidth()) + x] = color; - m_has_been_modified = true; - } - - int Texture::GetPixel(int x, int y) noexcept - { - MLX_PROFILE_FUNCTION(); - if(x < 0 || y < 0 || static_cast(x) > GetWidth() || static_cast(y) > GetHeight()) - return 0; - if(m_map == nullptr) - OpenCPUmap(); - std::uint32_t color = m_cpu_map[(y * GetWidth()) + x]; - std::uint8_t* bytes = reinterpret_cast(&color); - std::uint8_t tmp = bytes[0]; - bytes[0] = bytes[2]; - bytes[2] = tmp; - return *reinterpret_cast(bytes); - } - - void Texture::OpenCPUmap() - { - MLX_PROFILE_FUNCTION(); - if(m_map != nullptr) - return; - - DebugLog("Texture : enabling CPU mapping"); - std::size_t size = GetWidth() * GetHeight() * FormatSize(GetFormat()); - m_buf_map.emplace(); - #ifdef DEBUG - m_buf_map->Create(BufferType::HighDynamic, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, m_name.c_str()); - #else - m_buf_map->Create(BufferType::HighDynamic, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, nullptr); - #endif - Image::CopyToBuffer(*m_buf_map); - m_buf_map->MapMem(&_map); - m_cpu_map = std::vector(GetWidth() * GetHeight(), 0); - std::memcpy(m_cpu_map.data(), m_map, size); - DebugLog("Texture : mapped CPU memory using staging buffer"); - } - - void Texture::Render(Renderer& renderer, int x, int y) - { - MLX_PROFILE_FUNCTION(); - if(m_has_been_modified) - { - std::memcpy(m_map, m_cpu_map.data(), m_cpu_map.size() * FormatSize(GetFormat())); - Image::copyFromBuffer(*m_buf_map); - m_has_been_modified = false; - } - if(!m_set.IsInit()) - m_set = renderer.GetFragDescriptorSet().Duplicate(); - if(GetLayout() != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) - TransitionLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); - if(!m_has_set_been_updated) - UpdateSet(0); - auto cmd = renderer.GetActiveCmdBuffer(); - m_vbo.bind(renderer); - m_ibo.bind(renderer); - glm::vec2 translate(x, y); - vkCmdPushConstants(cmd.Get(), renderer.GetPipeline().GetPipelineLayout(), VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(translate), &translate); - m_set.Bind(); - vkCmdDrawIndexed(cmd.Get(), static_cast(m_ibo.GetSize() / sizeof(std::uint16_t)), 1, 0, 0, 0); - } - - void Texture::Destroy() noexcept - { - MLX_PROFILE_FUNCTION(); - Image::Destroy(); - m_set.Destroy(); - if(m_buf_map.has_value()) - m_buf_map->Destroy(); - m_vbo.destroy(); - m_ibo.destroy(); - } - - Texture stbTextureLoad(std::filesystem::path file, int* w, int* h) - { - MLX_PROFILE_FUNCTION(); - Texture* texture = new Texture; - int channels; - std::uint8_t* data = nullptr; - std::string filename = file.string(); - - if(!std::filesystem::exists(std::move(file))) - { - Error("Image : file not found '%s'", filename.c_str()); - return nullptr; - } - if(stbi_is_hdr(filename.c_str())) - { - Error("Texture : unsupported image format '%s'", filename.c_str()); - return nullptr; - } - int dummy_w; - int dummy_h; - data = stbi_load(filename.c_str(), (w == nullptr ? &dummy_w : w), (h == nullptr ? &dummy_h : h), &channels, 4); - #ifdef DEBUG - texture->Create(data, (w == nullptr ? dummy_w : *w), (h == nullptr ? dummy_h : *h), VK_FORMAT_R8G8B8A8_UNORM, filename.c_str()); - #else - texture->Create(data, (w == nullptr ? dummy_w : *w), (h == nullptr ? dummy_h : *h), VK_FORMAT_R8G8B8A8_UNORM, nullptr); - #endif - stbi_image_free(data); - return texture; - } -} diff --git a/runtime/Sources/Renderer/Images/TextureAtlas.cpp b/runtime/Sources/Renderer/Images/TextureAtlas.cpp deleted file mode 100644 index 04bf46f..0000000 --- a/runtime/Sources/Renderer/Images/TextureAtlas.cpp +++ /dev/null @@ -1,58 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* TextureAtlas.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/07 16:40:09 by maldavid #+# #+# */ -/* Updated: 2024/04/23 21:54:05 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include - -#ifdef IMAGE_OPTIMIZED - #define TILING VK_IMAGE_TILING_OPTIMAL -#else - #define TILING VK_IMAGE_TILING_LINEAR -#endif - -namespace mlx -{ - void TextureAtlas::Create(std::uint8_t* pixels, std::uint32_t width, std::uint32_t height, VkFormat format, const char* name, bool dedicated_memory) - { - Image::Create(width, height, format, TILING, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, name, dedicated_memory); - Image::CreateImageView(VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_ASPECT_COLOR_BIT); - Image::CreateSampler(); - TransitionLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); - - if(pixels == nullptr) - { - Warning("Renderer : creating an empty texture atlas. They cannot be updated after creation, this might be a mistake or a bug, please report"); - return; - } - Buffer staging_buffer; - std::size_t size = width * height * FormatSize(format); - staging_buffer.Create(BufferType::HighDynamic, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, name, pixels); - Image::CopyFromBuffer(staging_buffer); - staging_buffer.Destroy(); - } - - void TextureAtlas::Render(Renderer& renderer, int x, int y, std::uint32_t ibo_size) const - { - auto cmd = renderer.GetActiveCmdBuffer().Get(); - - glm::vec2 translate(x, y); - vkCmdPushConstants(cmd, renderer.GetPipeline().GetPipelineLayout(), VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(translate), &translate); - vkCmdDrawIndexed(cmd, ibo_size / sizeof(std::uint16_t), 1, 0, 0, 0); - } - - void TextureAtlas::Destroy() noexcept - { - Image::Destroy(); - m_set.Destroy(); - } -} diff --git a/runtime/Sources/Renderer/Memory.cpp b/runtime/Sources/Renderer/Memory.cpp new file mode 100644 index 0000000..4b22069 --- /dev/null +++ b/runtime/Sources/Renderer/Memory.cpp @@ -0,0 +1,159 @@ +#include + +#define VMA_STATIC_VULKAN_FUNCTIONS 0 +#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0 +#define VMA_VULKAN_VERSION 1000000 +#define VMA_ASSERT(expr) ((void)0) +#define VMA_IMPLEMENTATION + +#ifdef MLX_COMPILER_CLANG + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Weverything" + #include + #pragma clang diagnostic pop +#elif defined(MLX_COMPILER_GCC) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wimplicit-fallthrough" + #pragma GCC diagnostic ignored "-Wmissing-field-initializers" + #pragma GCC diagnostic ignored "-Wunused-parameter" + #pragma GCC diagnostic ignored "-Wunused-variable" + #pragma GCC diagnostic ignored "-Wparentheses" + #include + #pragma GCC diagnostic pop +#else + #include +#endif + +#include + +namespace mlx +{ + void GPUAllocator::Init() noexcept + { + VmaVulkanFunctions vma_vulkan_func{}; + vma_vulkan_func.vkAllocateMemory = vkAllocateMemory; + vma_vulkan_func.vkBindBufferMemory = vkBindBufferMemory; + vma_vulkan_func.vkBindImageMemory = vkBindImageMemory; + vma_vulkan_func.vkCreateBuffer = vkCreateBuffer; + vma_vulkan_func.vkCreateImage = vkCreateImage; + vma_vulkan_func.vkDestroyBuffer = vkDestroyBuffer; + vma_vulkan_func.vkDestroyImage = vkDestroyImage; + vma_vulkan_func.vkFlushMappedMemoryRanges = vkFlushMappedMemoryRanges; + vma_vulkan_func.vkFreeMemory = vkFreeMemory; + vma_vulkan_func.vkGetBufferMemoryRequirements = vkGetBufferMemoryRequirements; + vma_vulkan_func.vkGetImageMemoryRequirements = vkGetImageMemoryRequirements; + vma_vulkan_func.vkGetPhysicalDeviceMemoryProperties = vkGetPhysicalDeviceMemoryProperties; + vma_vulkan_func.vkGetPhysicalDeviceProperties = vkGetPhysicalDeviceProperties; + vma_vulkan_func.vkInvalidateMappedMemoryRanges = vkInvalidateMappedMemoryRanges; + vma_vulkan_func.vkMapMemory = vkMapMemory; + vma_vulkan_func.vkUnmapMemory = vkUnmapMemory; + vma_vulkan_func.vkCmdCopyBuffer = vkCmdCopyBuffer; + + VmaAllocatorCreateInfo allocator_create_info{}; + allocator_create_info.vulkanApiVersion = VK_API_VERSION_1_0; + allocator_create_info.physicalDevice = RenderCore::Get().GetPhysicalDevice(); + allocator_create_info.device = RenderCore::Get().GetDevice(); + allocator_create_info.instance = RenderCore::Get().GetInstance(); + allocator_create_info.pVulkanFunctions = &vma_vulkan_func; + + kvfCheckVk(vmaCreateAllocator(&allocator_create_info, &m_allocator)); + DebugLog("Graphics allocator : created new allocator"); + } + + VmaAllocation GPUAllocator::CreateBuffer(const VkBufferCreateInfo* binfo, const VmaAllocationCreateInfo* vinfo, VkBuffer& buffer, const char* name) noexcept + { + MLX_PROFILE_FUNCTION(); + VmaAllocation allocation; + kvfCheckVk(vmaCreateBuffer(m_allocator, binfo, vinfo, &buffer, &allocation, nullptr)); + if(name != nullptr) + { + vmaSetAllocationName(m_allocator, allocation, name); + } + DebugLog("Graphics Allocator : created new buffer '%'", name); + m_active_buffers_allocations++; + return allocation; + } + + void GPUAllocator::DestroyBuffer(VmaAllocation allocation, VkBuffer buffer) noexcept + { + MLX_PROFILE_FUNCTION(); + RenderCore::Get().WaitDeviceIdle(); + vmaDestroyBuffer(m_allocator, buffer, allocation); + DebugLog("Graphics Allocator : destroyed buffer"); + m_active_buffers_allocations--; + } + + VmaAllocation GPUAllocator::CreateImage(const VkImageCreateInfo* iminfo, const VmaAllocationCreateInfo* vinfo, VkImage& image, const char* name) noexcept + { + MLX_PROFILE_FUNCTION(); + VmaAllocation allocation; + kvfCheckVk(vmaCreateImage(m_allocator, iminfo, vinfo, &image, &allocation, nullptr)); + if(name != nullptr) + { + vmaSetAllocationName(m_allocator, allocation, name); + } + DebugLog("Graphics Allocator : created new image '%'", name); + m_active_images_allocations++; + return allocation; + } + + void GPUAllocator::DestroyImage(VmaAllocation allocation, VkImage image) noexcept + { + MLX_PROFILE_FUNCTION(); + RenderCore::Get().WaitDeviceIdle(); + vmaDestroyImage(m_allocator, image, allocation); + DebugLog("Graphics Allocator : destroyed image"); + m_active_images_allocations--; + } + + void GPUAllocator::MapMemory(VmaAllocation allocation, void** data) noexcept + { + MLX_PROFILE_FUNCTION(); + kvfCheckVk(vmaMapMemory(m_allocator, allocation, data)); + } + + void GPUAllocator::UnmapMemory(VmaAllocation allocation) noexcept + { + MLX_PROFILE_FUNCTION(); + vmaUnmapMemory(m_allocator, allocation); + } + + void GPUAllocator::DumpMemoryToJson() + { + static std::uint32_t id = 0; + std::string name("memory_dump"); + name.append(std::to_string(id) + ".json"); + std::ofstream file(name); + if(!file.is_open()) + { + Error("Graphics allocator : unable to dump memory to a json file"); + return; + } + char* str = nullptr; + vmaBuildStatsString(m_allocator, &str, true); + file << str; + vmaFreeStatsString(m_allocator, str); + file.close(); + id++; + } + + void GPUAllocator::Flush(VmaAllocation allocation, VkDeviceSize size, VkDeviceSize offset) noexcept + { + MLX_PROFILE_FUNCTION(); + vmaFlushAllocation(m_allocator, allocation, offset, size); + } + + void GPUAllocator::Destroy() noexcept + { + if(m_active_images_allocations != 0) + Error("Graphics allocator : some user-dependant allocations were not freed before destroying the display (% active allocations). You may have not destroyed all the MLX resources you've created", m_active_images_allocations); + else if(m_active_buffers_allocations != 0) + Error("Graphics allocator : some MLX-dependant allocations were not freed before destroying the display (% active allocations). This is an error in the MLX, please report this should not happen", m_active_buffers_allocations); + if(m_active_images_allocations < 0 || m_active_buffers_allocations < 0) + Warning("Graphics allocator : the impossible happened, the MLX has freed more allocations than it has made (wtf)"); + vmaDestroyAllocator(m_allocator); + m_active_buffers_allocations = 0; + m_active_images_allocations = 0; + DebugLog("Vulkan : destroyed a graphics allocator"); + } +} diff --git a/runtime/Sources/Renderer/Pipelines/Graphics.cpp b/runtime/Sources/Renderer/Pipelines/Graphics.cpp new file mode 100644 index 0000000..81bee86 --- /dev/null +++ b/runtime/Sources/Renderer/Pipelines/Graphics.cpp @@ -0,0 +1,164 @@ +#include +#include +#include +#include +#include +#include + +namespace Scop +{ + void GraphicPipeline::Init(const GraphicPipelineDescriptor& descriptor) + { + if(!descriptor.vertex_shader || !descriptor.fragment_shader) + FatalError("Vulkan : invalid shaders"); + + m_attachments = descriptor.color_attachments; + p_vertex_shader = descriptor.vertex_shader; + p_fragment_shader = descriptor.fragment_shader; + p_renderer = descriptor.renderer; + + std::vector push_constants; + std::vector set_layouts; + push_constants.insert(push_constants.end(), p_vertex_shader->GetPipelineLayout().push_constants.begin(), p_vertex_shader->GetPipelineLayout().push_constants.end()); + push_constants.insert(push_constants.end(), p_fragment_shader->GetPipelineLayout().push_constants.begin(), p_fragment_shader->GetPipelineLayout().push_constants.end()); + set_layouts.insert(set_layouts.end(), p_vertex_shader->GetPipelineLayout().set_layouts.begin(), p_vertex_shader->GetPipelineLayout().set_layouts.end()); + set_layouts.insert(set_layouts.end(), p_fragment_shader->GetPipelineLayout().set_layouts.begin(), p_fragment_shader->GetPipelineLayout().set_layouts.end()); + m_pipeline_layout = kvfCreatePipelineLayout(RenderCore::Get().GetDevice(), set_layouts.data(), set_layouts.size(), push_constants.data(), push_constants.size()); + + TransitionAttachments(); + CreateFramebuffers(m_attachments, descriptor.clear_color_attachments); + + VkPhysicalDeviceFeatures features{}; + vkGetPhysicalDeviceFeatures(RenderCore::Get().GetPhysicalDevice(), &features); + + KvfGraphicsPipelineBuilder* builder = kvfCreateGPipelineBuilder(); + kvfGPipelineBuilderAddShaderStage(builder, p_vertex_shader->GetShaderStage(), p_vertex_shader->GetShaderModule(), "main"); + kvfGPipelineBuilderAddShaderStage(builder, p_fragment_shader->GetShaderStage(), p_fragment_shader->GetShaderModule(), "main"); + kvfGPipelineBuilderSetInputTopology(builder, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST); + kvfGPipelineBuilderSetCullMode(builder, VK_CULL_MODE_NONE, VK_FRONT_FACE_CLOCKWISE); + kvfGPipelineBuilderEnableAlphaBlending(builder); + kvfGPipelineBuilderDisableDepthTest(builder); + kvfGPipelineBuilderSetPolygonMode(builder, VK_POLYGON_MODE_FILL, 1.0f); + if(features.sampleRateShading) + kvfGPipelineBuilderSetMultisamplingShading(builder, VK_SAMPLE_COUNT_1_BIT, 0.25f); + else + kvfGPipelineBuilderSetMultisampling(builder, VK_SAMPLE_COUNT_1_BIT); + + if(!descriptor.no_vertex_inputs) + { + VkVertexInputBindingDescription binding_description = Vertex::GetBindingDescription(); + auto attributes_description = Vertex::GetAttributeDescriptions(); + kvfGPipelineBuilderSetVertexInputs(builder, binding_description, attributes_description.data(), attributes_description.size()); + } + + m_pipeline = kvfCreateGraphicsPipeline(RenderCore::Get().GetDevice(), m_pipeline_layout, builder, m_renderpass); + DebugLog("Vulkan : graphics pipeline created"); + kvfDestroyGPipelineBuilder(builder); + } + + bool GraphicPipeline::BindPipeline(VkCommandBuffer command_buffer, std::size_t framebuffer_index, std::array clear) noexcept + { + TransitionAttachments(command_buffer); + VkFramebuffer fb = m_framebuffers[framebuffer_index]; + VkExtent2D fb_extent = kvfGetFramebufferSize(fb); + + VkViewport viewport{}; + viewport.x = 0.0f; + viewport.y = 0.0f; + viewport.width = fb_extent.width; + viewport.height = fb_extent.height; + viewport.minDepth = 0.0f; + viewport.maxDepth = 1.0f; + vkCmdSetViewport(command_buffer, 0, 1, &viewport); + + VkRect2D scissor{}; + scissor.offset = { 0, 0 }; + scissor.extent = fb_extent; + vkCmdSetScissor(command_buffer, 0, 1, &scissor); + + for(int i = 0; i < m_clears.size(); i++) + { + m_clears[i].color.float32[0] = clear[0]; + m_clears[i].color.float32[1] = clear[1]; + m_clears[i].color.float32[2] = clear[2]; + m_clears[i].color.float32[3] = clear[3]; + } + + kvfBeginRenderPass(m_renderpass, command_buffer, fb, fb_extent, m_clears.data(), m_clears.size()); + vkCmdBindPipeline(command_buffer, GetPipelineBindPoint(), GetPipeline()); + return true; + } + + void GraphicPipeline::EndPipeline(VkCommandBuffer command_buffer) noexcept + { + vkCmdEndRenderPass(command_buffer); + } + + void GraphicPipeline::Destroy() noexcept + { + p_vertex_shader.reset(); + p_fragment_shader.reset(); + for(auto& fb : m_framebuffers) + { + kvfDestroyFramebuffer(RenderCore::Get().GetDevice(), fb); + DebugLog("Vulkan : framebuffer destroyed"); + } + m_framebuffers.clear(); + kvfDestroyPipelineLayout(RenderCore::Get().GetDevice(), m_pipeline_layout); + m_pipeline_layout = VK_NULL_HANDLE; + DebugLog("Vulkan : graphics pipeline layout destroyed"); + kvfDestroyRenderPass(RenderCore::Get().GetDevice(), m_renderpass); + m_renderpass = VK_NULL_HANDLE; + DebugLog("Vulkan : renderpass destroyed"); + kvfDestroyPipeline(RenderCore::Get().GetDevice(), m_pipeline); + m_pipeline = VK_NULL_HANDLE; + DebugLog("Vulkan : graphics pipeline destroyed"); + } + + void GraphicPipeline::CreateFramebuffers(const std::vector>& render_targets, bool clear_attachments) + { + std::vector attachments; + std::vector attachment_views; + if(p_renderer) + { + attachments.push_back(kvfBuildSwapchainAttachmentDescription(p_renderer->GetSwapchain(), clear_attachments)); + attachment_views.push_back(p_renderer->GetSwapchainImages()[0].GetImageView()); + } + + for(NonOwningPtr image : render_targets) + { + attachments.push_back(kvfBuildAttachmentDescription(KVF_IMAGE_COLOR, image->GetFormat(), image->GetLayout(), image->GetLayout(), clear_attachments, VK_SAMPLE_COUNT_1_BIT)); + attachment_views.push_back(image->GetImageView()); + } + + m_renderpass = kvfCreateRenderPass(RenderCore::Get().GetDevice(), attachments.data(), attachments.size(), GetPipelineBindPoint()); + m_clears.clear(); + m_clears.resize(attachments.size()); + DebugLog("Vulkan : renderpass created"); + + if(p_renderer) + { + for(const Image& image : p_renderer->GetSwapchainImages()) + { + attachment_views[0] = image.GetImageView(); + m_framebuffers.push_back(kvfCreateFramebuffer(RenderCore::Get().GetDevice(), m_renderpass, attachment_views.data(), attachment_views.size(), { .width = image.GetWidth(), .height = image.GetHeight() })); + DebugLog("Vulkan : framebuffer created"); + } + } + for(NonOwningPtr image : render_targets) + { + m_framebuffers.push_back(kvfCreateFramebuffer(RenderCore::Get().GetDevice(), m_renderpass, attachment_views.data(), attachment_views.size(), { .width = image->GetWidth(), .height = image->GetHeight() })); + DebugLog("Vulkan : framebuffer created"); + } + } + + void GraphicPipeline::TransitionAttachments(VkCommandBuffer cmd) + { + for(NonOwningPtr image : m_attachments) + { + if(!image->IsInit()) + continue; + image->TransitionLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, cmd); + } + } +} diff --git a/runtime/Sources/Renderer/Pipelines/Pipeline.cpp b/runtime/Sources/Renderer/Pipelines/Pipeline.cpp deleted file mode 100644 index cd194fe..0000000 --- a/runtime/Sources/Renderer/Pipelines/Pipeline.cpp +++ /dev/null @@ -1,331 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Pipeline.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/12/18 21:27:38 by maldavid #+# #+# */ -/* Updated: 2024/04/23 22:24:13 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include -#include -#include - -namespace mlx -{ - /** - #version 450 core - - layout(location = 0) in vec2 aPos; - layout(location = 1) in vec4 aColor; - layout(location = 2) in vec2 aUV; - - layout(set = 0, binding = 0) uniform uProjection - { - mat4 mat; - } uProj; - - layout(push_constant) uniform uModelPushConstant - { - vec2 vec; - } uTranslate; - - out gl_PerVertex - { - vec4 gl_Position; - }; - - layout(location = 0) out struct - { - vec4 Color; - vec2 UV; - } Out; - - void main() - { - Out.Color = aColor; - Out.UV = aUV; - vec2 pos = aPos + uTranslate.vec; - gl_Position = uProj.mat * vec4(pos.x, pos.y, 0.0, 1.0); - } - */ - const std::vector vertex_shader = { // precompiled vertex shader - 0x07230203,0x00010000,0x0008000b,0x0000003b,0x00000000,0x00020011,0x00000001,0x0006000b, - 0x00000001,0x4c534c47,0x6474732e,0x3035342e,0x00000000,0x0003000e,0x00000000,0x00000001, - 0x000a000f,0x00000000,0x00000004,0x6e69616d,0x00000000,0x0000000b,0x0000000f,0x00000015, - 0x0000001b,0x00000026,0x00030003,0x00000002,0x000001c2,0x00040005,0x00000004,0x6e69616d, - 0x00000000,0x00030005,0x00000009,0x00000000,0x00050006,0x00000009,0x00000000,0x6f6c6f43, - 0x00000072,0x00040006,0x00000009,0x00000001,0x00005655,0x00030005,0x0000000b,0x0074754f, - 0x00040005,0x0000000f,0x6c6f4361,0x0000726f,0x00030005,0x00000015,0x00565561,0x00030005, - 0x0000001a,0x00736f70,0x00040005,0x0000001b,0x736f5061,0x00000000,0x00070005,0x0000001d, - 0x646f4d75,0x75506c65,0x6f436873,0x6174736e,0x0000746e,0x00040006,0x0000001d,0x00000000, - 0x00636576,0x00050005,0x0000001f,0x61725475,0x616c736e,0x00006574,0x00060005,0x00000024, - 0x505f6c67,0x65567265,0x78657472,0x00000000,0x00060006,0x00000024,0x00000000,0x505f6c67, - 0x7469736f,0x006e6f69,0x00030005,0x00000026,0x00000000,0x00050005,0x00000028,0x6f725075, - 0x7463656a,0x006e6f69,0x00040006,0x00000028,0x00000000,0x0074616d,0x00040005,0x0000002a, - 0x6f725075,0x0000006a,0x00040047,0x0000000b,0x0000001e,0x00000000,0x00040047,0x0000000f, - 0x0000001e,0x00000001,0x00040047,0x00000015,0x0000001e,0x00000002,0x00040047,0x0000001b, - 0x0000001e,0x00000000,0x00050048,0x0000001d,0x00000000,0x00000023,0x00000000,0x00030047, - 0x0000001d,0x00000002,0x00050048,0x00000024,0x00000000,0x0000000b,0x00000000,0x00030047, - 0x00000024,0x00000002,0x00040048,0x00000028,0x00000000,0x00000005,0x00050048,0x00000028, - 0x00000000,0x00000023,0x00000000,0x00050048,0x00000028,0x00000000,0x00000007,0x00000010, - 0x00030047,0x00000028,0x00000002,0x00040047,0x0000002a,0x00000022,0x00000000,0x00040047, - 0x0000002a,0x00000021,0x00000000,0x00020013,0x00000002,0x00030021,0x00000003,0x00000002, - 0x00030016,0x00000006,0x00000020,0x00040017,0x00000007,0x00000006,0x00000004,0x00040017, - 0x00000008,0x00000006,0x00000002,0x0004001e,0x00000009,0x00000007,0x00000008,0x00040020, - 0x0000000a,0x00000003,0x00000009,0x0004003b,0x0000000a,0x0000000b,0x00000003,0x00040015, - 0x0000000c,0x00000020,0x00000001,0x0004002b,0x0000000c,0x0000000d,0x00000000,0x00040020, - 0x0000000e,0x00000001,0x00000007,0x0004003b,0x0000000e,0x0000000f,0x00000001,0x00040020, - 0x00000011,0x00000003,0x00000007,0x0004002b,0x0000000c,0x00000013,0x00000001,0x00040020, - 0x00000014,0x00000001,0x00000008,0x0004003b,0x00000014,0x00000015,0x00000001,0x00040020, - 0x00000017,0x00000003,0x00000008,0x00040020,0x00000019,0x00000007,0x00000008,0x0004003b, - 0x00000014,0x0000001b,0x00000001,0x0003001e,0x0000001d,0x00000008,0x00040020,0x0000001e, - 0x00000009,0x0000001d,0x0004003b,0x0000001e,0x0000001f,0x00000009,0x00040020,0x00000020, - 0x00000009,0x00000008,0x0003001e,0x00000024,0x00000007,0x00040020,0x00000025,0x00000003, - 0x00000024,0x0004003b,0x00000025,0x00000026,0x00000003,0x00040018,0x00000027,0x00000007, - 0x00000004,0x0003001e,0x00000028,0x00000027,0x00040020,0x00000029,0x00000002,0x00000028, - 0x0004003b,0x00000029,0x0000002a,0x00000002,0x00040020,0x0000002b,0x00000002,0x00000027, - 0x00040015,0x0000002e,0x00000020,0x00000000,0x0004002b,0x0000002e,0x0000002f,0x00000000, - 0x00040020,0x00000030,0x00000007,0x00000006,0x0004002b,0x0000002e,0x00000033,0x00000001, - 0x0004002b,0x00000006,0x00000036,0x00000000,0x0004002b,0x00000006,0x00000037,0x3f800000, - 0x00050036,0x00000002,0x00000004,0x00000000,0x00000003,0x000200f8,0x00000005,0x0004003b, - 0x00000019,0x0000001a,0x00000007,0x0004003d,0x00000007,0x00000010,0x0000000f,0x00050041, - 0x00000011,0x00000012,0x0000000b,0x0000000d,0x0003003e,0x00000012,0x00000010,0x0004003d, - 0x00000008,0x00000016,0x00000015,0x00050041,0x00000017,0x00000018,0x0000000b,0x00000013, - 0x0003003e,0x00000018,0x00000016,0x0004003d,0x00000008,0x0000001c,0x0000001b,0x00050041, - 0x00000020,0x00000021,0x0000001f,0x0000000d,0x0004003d,0x00000008,0x00000022,0x00000021, - 0x00050081,0x00000008,0x00000023,0x0000001c,0x00000022,0x0003003e,0x0000001a,0x00000023, - 0x00050041,0x0000002b,0x0000002c,0x0000002a,0x0000000d,0x0004003d,0x00000027,0x0000002d, - 0x0000002c,0x00050041,0x00000030,0x00000031,0x0000001a,0x0000002f,0x0004003d,0x00000006, - 0x00000032,0x00000031,0x00050041,0x00000030,0x00000034,0x0000001a,0x00000033,0x0004003d, - 0x00000006,0x00000035,0x00000034,0x00070050,0x00000007,0x00000038,0x00000032,0x00000035, - 0x00000036,0x00000037,0x00050091,0x00000007,0x00000039,0x0000002d,0x00000038,0x00050041, - 0x00000011,0x0000003a,0x00000026,0x0000000d,0x0003003e,0x0000003a,0x00000039,0x000100fd, - 0x00010038 - }; - - /** - #version 450 core - - layout(location = 0) out vec4 fColor; - - layout(set = 1, binding = 0) uniform sampler2D sTexture; - - layout(location = 0) in struct - { - vec4 Color; - vec2 UV; - } In; - - void main() - { - vec4 process_color = In.Color * texture(sTexture, In.UV.st); - if(process_color.w == 0) - discard; - fColor = process_color; - } - */ - const std::vector fragment_shader = { // pre compiled fragment shader - 0x07230203,0x00010000,0x0008000b,0x0000002c,0x00000000,0x00020011,0x00000001,0x0006000b, - 0x00000001,0x4c534c47,0x6474732e,0x3035342e,0x00000000,0x0003000e,0x00000000,0x00000001, - 0x0007000f,0x00000004,0x00000004,0x6e69616d,0x00000000,0x0000000d,0x0000002a,0x00030010, - 0x00000004,0x00000007,0x00030003,0x00000002,0x000001c2,0x00040005,0x00000004,0x6e69616d, - 0x00000000,0x00060005,0x00000009,0x636f7270,0x5f737365,0x6f6c6f63,0x00000072,0x00030005, - 0x0000000b,0x00000000,0x00050006,0x0000000b,0x00000000,0x6f6c6f43,0x00000072,0x00040006, - 0x0000000b,0x00000001,0x00005655,0x00030005,0x0000000d,0x00006e49,0x00050005,0x00000016, - 0x78655473,0x65727574,0x00000000,0x00040005,0x0000002a,0x6c6f4366,0x0000726f,0x00040047, - 0x0000000d,0x0000001e,0x00000000,0x00040047,0x00000016,0x00000022,0x00000001,0x00040047, - 0x00000016,0x00000021,0x00000000,0x00040047,0x0000002a,0x0000001e,0x00000000,0x00020013, - 0x00000002,0x00030021,0x00000003,0x00000002,0x00030016,0x00000006,0x00000020,0x00040017, - 0x00000007,0x00000006,0x00000004,0x00040020,0x00000008,0x00000007,0x00000007,0x00040017, - 0x0000000a,0x00000006,0x00000002,0x0004001e,0x0000000b,0x00000007,0x0000000a,0x00040020, - 0x0000000c,0x00000001,0x0000000b,0x0004003b,0x0000000c,0x0000000d,0x00000001,0x00040015, - 0x0000000e,0x00000020,0x00000001,0x0004002b,0x0000000e,0x0000000f,0x00000000,0x00040020, - 0x00000010,0x00000001,0x00000007,0x00090019,0x00000013,0x00000006,0x00000001,0x00000000, - 0x00000000,0x00000000,0x00000001,0x00000000,0x0003001b,0x00000014,0x00000013,0x00040020, - 0x00000015,0x00000000,0x00000014,0x0004003b,0x00000015,0x00000016,0x00000000,0x0004002b, - 0x0000000e,0x00000018,0x00000001,0x00040020,0x00000019,0x00000001,0x0000000a,0x00040015, - 0x0000001e,0x00000020,0x00000000,0x0004002b,0x0000001e,0x0000001f,0x00000003,0x00040020, - 0x00000020,0x00000007,0x00000006,0x0004002b,0x00000006,0x00000023,0x00000000,0x00020014, - 0x00000024,0x00040020,0x00000029,0x00000003,0x00000007,0x0004003b,0x00000029,0x0000002a, - 0x00000003,0x00050036,0x00000002,0x00000004,0x00000000,0x00000003,0x000200f8,0x00000005, - 0x0004003b,0x00000008,0x00000009,0x00000007,0x00050041,0x00000010,0x00000011,0x0000000d, - 0x0000000f,0x0004003d,0x00000007,0x00000012,0x00000011,0x0004003d,0x00000014,0x00000017, - 0x00000016,0x00050041,0x00000019,0x0000001a,0x0000000d,0x00000018,0x0004003d,0x0000000a, - 0x0000001b,0x0000001a,0x00050057,0x00000007,0x0000001c,0x00000017,0x0000001b,0x00050085, - 0x00000007,0x0000001d,0x00000012,0x0000001c,0x0003003e,0x00000009,0x0000001d,0x00050041, - 0x00000020,0x00000021,0x00000009,0x0000001f,0x0004003d,0x00000006,0x00000022,0x00000021, - 0x000500b4,0x00000024,0x00000025,0x00000022,0x00000023,0x000300f7,0x00000027,0x00000000, - 0x000400fa,0x00000025,0x00000026,0x00000027,0x000200f8,0x00000026,0x000100fc,0x000200f8, - 0x00000027,0x0004003d,0x00000007,0x0000002b,0x00000009,0x0003003e,0x0000002a,0x0000002b, - 0x000100fd,0x00010038 - }; - - void GraphicPipeline::Init(Renderer& renderer) - { - VkShaderModuleCreateInfo create_info{}; - create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; - create_info.codeSize = vertex_shader.size() * sizeof(std::uint32_t); - create_info.pCode = vertex_shader.data(); - VkShaderModule vshader; - if(vkCreateShaderModule(RenderCore::Get().GetDevice().Get(), &create_info, nullptr, &vshader) != VK_SUCCESS) - FatalError("Vulkan : failed to create a vertex shader module"); - - VkPushConstantRange push_constant; - push_constant.offset = 0; - push_constant.size = sizeof(glm::vec2); - push_constant.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; - - create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; - create_info.codeSize = fragment_shader.size() * sizeof(std::uint32_t); - create_info.pCode = fragment_shader.data(); - VkShaderModule fshader; - if(vkCreateShaderModule(RenderCore::Get().GetDevice().Get(), &create_info, nullptr, &fshader) != VK_SUCCESS) - FatalError("Vulkan : failed to create a fragment shader module"); - - VkPipelineShaderStageCreateInfo vert_shader_stage_info{}; - vert_shader_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; - vert_shader_stage_info.stage = VK_SHADER_STAGE_VERTEX_BIT; - vert_shader_stage_info.module = vshader; - vert_shader_stage_info.pName = "main"; - - VkPipelineShaderStageCreateInfo frag_shader_stage_info{}; - frag_shader_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; - frag_shader_stage_info.stage = VK_SHADER_STAGE_FRAGMENT_BIT; - frag_shader_stage_info.module = fshader; - frag_shader_stage_info.pName = "main"; - - std::array stages = { vert_shader_stage_info, frag_shader_stage_info }; - - auto binding_description = Vertex::GetBindingDescription(); - auto attribute_descriptions = Vertex::GetAttributeDescriptions(); - - VkPipelineVertexInputStateCreateInfo vertex_input_state_create_info{}; - vertex_input_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; - vertex_input_state_create_info.vertexBindingDescriptionCount = 1; - vertex_input_state_create_info.pVertexBindingDescriptions = &binding_description; - vertex_input_state_create_info.vertexAttributeDescriptionCount = static_cast(attribute_descriptions.size()); - vertex_input_state_create_info.pVertexAttributeDescriptions = attribute_descriptions.data(); - - VkPipelineInputAssemblyStateCreateInfo input_assembly{}; - input_assembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; - input_assembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; - input_assembly.primitiveRestartEnable = VK_FALSE; - - VkDynamicState states[] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR }; - - constexpr std::size_t states_count = sizeof(states) / sizeof(VkDynamicState); - VkPipelineDynamicStateCreateInfo dynamic_states{}; - dynamic_states.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; - dynamic_states.dynamicStateCount = states_count; - dynamic_states.pDynamicStates = states; - - VkViewport viewport{}; - viewport.x = 0.0f; - viewport.y = 0.0f; - viewport.width = (float)renderer.GetFrameBuffer(0).GetWidth(); - viewport.height = (float)renderer.GetFrameBuffer(0).GetHeight(); - viewport.minDepth = 0.0f; - viewport.maxDepth = 1.0f; - - VkRect2D scissor{}; - scissor.offset = { 0, 0 }; - scissor.extent = { renderer.GetFrameBuffer(0).GetWidth(), renderer.GetFrameBuffer(0).GetHeight()}; - - VkPipelineViewportStateCreateInfo viewport_state{}; - viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; - viewport_state.viewportCount = 1; - viewport_state.pViewports = &viewport; - viewport_state.scissorCount = 1; - viewport_state.pScissors = &scissor; - - VkPipelineRasterizationStateCreateInfo rasterizer{}; - rasterizer.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; - rasterizer.depthClampEnable = VK_FALSE; - rasterizer.rasterizerDiscardEnable = VK_FALSE; - rasterizer.polygonMode = VK_POLYGON_MODE_FILL; - rasterizer.lineWidth = 1.0f; - rasterizer.cullMode = VK_CULL_MODE_NONE; - rasterizer.frontFace = VK_FRONT_FACE_CLOCKWISE; - rasterizer.depthBiasEnable = VK_FALSE; - - VkPipelineMultisampleStateCreateInfo multisampling{}; - multisampling.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; - multisampling.sampleShadingEnable = VK_FALSE; - multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; - - VkPipelineColorBlendAttachmentState color_blend_attachment{}; - color_blend_attachment.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; - color_blend_attachment.blendEnable = VK_TRUE; - color_blend_attachment.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; - color_blend_attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; - color_blend_attachment.colorBlendOp = VK_BLEND_OP_ADD; - color_blend_attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; - color_blend_attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; - color_blend_attachment.alphaBlendOp = VK_BLEND_OP_ADD; - - VkPipelineColorBlendStateCreateInfo color_blending{}; - color_blending.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; - color_blending.logicOpEnable = VK_FALSE; - color_blending.logicOp = VK_LOGIC_OP_COPY; - color_blending.attachmentCount = 1; - color_blending.pAttachments = &color_blend_attachment; - color_blending.blendConstants[0] = 1.0f; - color_blending.blendConstants[1] = 1.0f; - color_blending.blendConstants[2] = 1.0f; - color_blending.blendConstants[3] = 1.0f; - - VkDescriptorSetLayout layouts[] = { - renderer.GetVertDescriptorSet().GetLayout(), - renderer.GetFragDescriptorSet().GetLayout() - }; - - VkPipelineLayoutCreateInfo pipeline_layout_info{}; - pipeline_layout_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; - pipeline_layout_info.setLayoutCount = 2; - pipeline_layout_info.pSetLayouts = layouts; - pipeline_layout_info.pushConstantRangeCount = 1; - pipeline_layout_info.pPushConstantRanges = &push_constant; - - if(vkCreatePipelineLayout(RenderCore::Get().GetDevice().Get(), &pipeline_layout_info, nullptr, &m_pipeline_layout) != VK_SUCCESS) - FatalError("Vulkan : failed to create a graphics pipeline layout"); - - VkGraphicsPipelineCreateInfo pipeline_info{}; - pipeline_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; - pipeline_info.stageCount = stages.size(); - pipeline_info.pStages = stages.data(); - pipeline_info.pVertexInputState = &vertex_input_state_create_info; - pipeline_info.pInputAssemblyState = &input_assembly; - pipeline_info.pViewportState = &viewport_state; - pipeline_info.pRasterizationState = &rasterizer; - pipeline_info.pMultisampleState = &multisampling; - pipeline_info.pColorBlendState = &color_blending; - pipeline_info.pDynamicState = &dynamic_states; - pipeline_info.layout = m_pipeline_layout; - pipeline_info.renderPass = renderer.GetRenderPass().Get(); - pipeline_info.subpass = 0; - pipeline_info.basePipelineHandle = VK_NULL_HANDLE; - - VkResult res = vkCreateGraphicsPipelines(RenderCore::Get().GetDevice().Get(), VK_NULL_HANDLE, 1, &pipeline_info, nullptr, &m_graphics_pipeline); - if(res != VK_SUCCESS) - FatalError("Vulkan : failed to create a graphics pipeline, %", VerbaliseVkResult(res)); - DebugLog("Vulkan : created new graphic pipeline"); - - vkDestroyShaderModule(RenderCore::Get().GetDevice().Get(), fshader, nullptr); - vkDestroyShaderModule(RenderCore::Get().GetDevice().Get(), vshader, nullptr); - } - - void GraphicPipeline::Destroy() noexcept - { - vkDestroyPipeline(RenderCore::Get().GetDevice().Get(), m_graphics_pipeline, nullptr); - vkDestroyPipelineLayout(RenderCore::Get().GetDevice().Get(), m_pipeline_layout, nullptr); - m_graphics_pipeline = VK_NULL_HANDLE; - DebugLog("Vulkan : destroyed a graphics pipeline"); - } -} diff --git a/runtime/Sources/Renderer/Pipelines/Shader.cpp b/runtime/Sources/Renderer/Pipelines/Shader.cpp new file mode 100644 index 0000000..63d8873 --- /dev/null +++ b/runtime/Sources/Renderer/Pipelines/Shader.cpp @@ -0,0 +1,83 @@ +#include +#include +#include + +namespace Scop +{ + Shader::Shader(const std::vector& bytecode, ShaderType type, ShaderLayout layout) : m_bytecode(bytecode), m_layout(std::move(layout)) + { + switch(type) + { + case ShaderType::Vertex : m_stage = VK_SHADER_STAGE_VERTEX_BIT; break; + case ShaderType::Fragment : m_stage = VK_SHADER_STAGE_FRAGMENT_BIT; break; + case ShaderType::Compute : m_stage = VK_SHADER_STAGE_COMPUTE_BIT; break; + + default : FatalError("wtf"); break; + } + m_module = kvfCreateShaderModule(RenderCore::Get().GetDevice(), m_bytecode.data(), m_bytecode.size() * 4); + DebugLog("Vulkan : shader module created"); + + GeneratePipelineLayout(m_layout); + } + + void Shader::GeneratePipelineLayout(ShaderLayout layout) + { + for(auto& [n, set] : layout.set_layouts) + { + std::vector bindings(set.binds.size()); + for(std::size_t i = 0; i < set.binds.size(); i++) + { + bindings[i].binding = set.binds[i].first; + bindings[i].descriptorCount = 1; + bindings[i].descriptorType = set.binds[i].second; + bindings[i].pImmutableSamplers = nullptr; + bindings[i].stageFlags = m_stage; + } + m_set_layouts.emplace_back(kvfCreateDescriptorSetLayout(RenderCore::Get().GetDevice(), bindings.data(), bindings.size())); + DebugLog("Vulkan : descriptor set layout created"); + m_pipeline_layout_part.set_layouts.push_back(m_set_layouts.back()); + } + + std::size_t i = 0; + std::vector push_constants(layout.push_constants.size()); + m_pipeline_layout_part.push_constants.resize(layout.push_constants.size()); + for(auto& pc : layout.push_constants) + { + VkPushConstantRange push_constant_range = {}; + push_constant_range.offset = pc.offset; + push_constant_range.size = pc.size; + push_constant_range.stageFlags = m_stage; + push_constants[i] = push_constant_range; + m_pipeline_layout_part.push_constants[i] = push_constant_range; + i++; + } + } + + Shader::~Shader() + { + kvfDestroyShaderModule(RenderCore::Get().GetDevice(), m_module); + DebugLog("Vulkan : shader module destroyed"); + for(auto& layout : m_set_layouts) + { + kvfDestroyDescriptorSetLayout(RenderCore::Get().GetDevice(), layout); + DebugLog("Vulkan : descriptor set layout destroyed"); + } + } + + std::shared_ptr LoadShaderFromFile(const std::filesystem::path& filepath, ShaderType type, ShaderLayout layout) + { + std::ifstream stream(filepath, std::ios::binary); + if(!stream.is_open()) + FatalError("Renderer : unable to open a spirv shader file, %", filepath); + std::vector data; + stream.seekg(0); + std::uint32_t part = 0; + while(stream.read(reinterpret_cast(&part), sizeof(part))) + data.push_back(part); + stream.close(); + + std::shared_ptr shader = std::make_shared(data, type, layout); + DebugLog("Vulkan : shader loaded %", filepath); + return shader; + } +} diff --git a/runtime/Sources/Renderer/PixelPut.cpp b/runtime/Sources/Renderer/PixelPut.cpp deleted file mode 100644 index f9743a6..0000000 --- a/runtime/Sources/Renderer/PixelPut.cpp +++ /dev/null @@ -1,67 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* PixelPut.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/03/31 15:14:50 by maldavid #+# #+# */ -/* Updated: 2024/04/24 01:46:06 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include - -namespace mlx -{ - void PixelPutPipeline::Init(std::uint32_t width, std::uint32_t height, Renderer& renderer) noexcept - { - MLX_PROFILE_FUNCTION(); - m_texture.Create(nullptr, width, height, VK_FORMAT_R8G8B8A8_UNORM, "__mlx_pixel_put_pipeline_texture", true); - m_texture.SetDescriptor(renderer.GetFragDescriptorSet().Duplicate()); - - m_buffer.Create(BufferType::HighDynamic, sizeof(std::uint32_t) * (width * height), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, "__mlx_pixel_put_pipeline_texture"); - m_buffer.MapMem(&_buffer_map); - m_cpu_map = std::vector(height * width + 1, 0); - m_width = width; - m_height = height; - } - - void PixelPutPipeline::SetPixel(int x, int y, std::uint32_t color) noexcept - { - MLX_PROFILE_FUNCTION(); - if(x < 0 || y < 0 || x > static_cast(m_width) || y > static_cast(m_height)) - return; - m_cpu_map[(y * m_width) + x] = color; - m_has_been_modified = true; - } - - void PixelPutPipeline::Clear() - { - MLX_PROFILE_FUNCTION(); - m_cpu_map.assign(m_width * m_height, 0); - m_has_been_modified = true; - } - - void PixelPutPipeline::Render(Renderer& renderer) noexcept - { - MLX_PROFILE_FUNCTION(); - if(m_has_been_modified) - { - std::memcpy(m_buffer_map, m_cpu_map.data(), sizeof(std::uint32_t) * m_cpu_map.size()); - m_texture.CopyFromBuffer(m_buffer); - m_has_been_modified = false; - } - m_texture.UpdateSet(0); - m_texture.Render(renderer, 0, 0); - } - - void PixelPutPipeline::Destroy() noexcept - { - MLX_PROFILE_FUNCTION(); - m_buffer.Destroy(); - m_texture.Destroy(); - } -} diff --git a/runtime/Sources/Renderer/RenderCore.cpp b/runtime/Sources/Renderer/RenderCore.cpp new file mode 100644 index 0000000..6623ae3 --- /dev/null +++ b/runtime/Sources/Renderer/RenderCore.cpp @@ -0,0 +1,78 @@ +#define KVF_IMPLEMENTATION +#ifdef DEBUG + #define KVF_ENABLE_VALIDATION_LAYERS +#endif +#include + +#include +#include +#include +#include +#include + +namespace mlx +{ + static VulkanLoader loader; + + void ErrorCallback(const char* message) noexcept + { + FatalError(message); + std::cout << std::endl; + } + + void ValidationErrorCallback(const char* message) noexcept + { + Error(message); + std::cout << std::endl; + } + + void ValidationWarningCallback(const char* message) noexcept + { + Warning(message); + std::cout << std::endl; + } + + void RenderCore::Init() noexcept + { + kvfSetErrorCallback(&ErrorCallback); + kvfSetValidationErrorCallback(&ValidationErrorCallback); + kvfSetValidationWarningCallback(&ValidationWarningCallback); + + //kvfAddLayer("VK_LAYER_MESA_overlay"); + + Window window(1, 1, "", true); + std::vector instance_extentions = window.GetRequiredVulkanInstanceExtentions(); + + m_instance = kvfCreateInstance(instance_extensions.data(), instance_extensions.size()); + DebugLog("Vulkan : instance created"); + + loader.LoadInstance(m_instance); + + VkSurfaceKHR surface = window.CreateVulkanSurface(m_instance); + + m_physical_device = kvfPickGoodDefaultPhysicalDevice(m_instance, surface); + + // just for style + VkPhysicalDeviceProperties props; + vkGetPhysicalDeviceProperties(m_physical_device, &props); + DebugLog("Vulkan : physical device picked '%'", props.deviceName); + + const char* device_extensions[] = { VK_KHR_SWAPCHAIN_EXTENSION_NAME }; + VkPhysicalDeviceFeatures features{}; + vkGetPhysicalDeviceFeatures(m_physical_device, &features); + m_device = kvfCreateDevice(m_physical_device, device_extensions, sizeof(device_extensions) / sizeof(device_extensions[0]), &features); + DebugLog("Vulkan : logical device created"); + + vkDestroySurfaceKHR(m_instance, surface, nullptr); + window.Destroy(); + } + + void RenderCore::Destroy() noexcept + { + WaitDeviceIdle(); + kvfDestroyDevice(m_device); + DebugLog("Vulkan : logical device destroyed"); + kvfDestroyInstance(m_instance); + DebugLog("Vulkan : instance destroyed"); + } +} diff --git a/runtime/Sources/Renderer/RenderPasses/2DPass.cpp b/runtime/Sources/Renderer/RenderPasses/2DPass.cpp new file mode 100644 index 0000000..28f199e --- /dev/null +++ b/runtime/Sources/Renderer/RenderPasses/2DPass.cpp @@ -0,0 +1,112 @@ +#include +#include +#include +#include +#include +#include +#include + +namespace mlx +{ + struct SpriteData + { + Vec4f color; + Vec2f position; + }; + + void Render2DPass::Init() + { + ShaderLayout vertex_shader_layout( + { + { 0, + ShaderSetLayout({ + { 0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER } + }) + } + }, { ShaderPushConstantLayout({ 0, sizeof(SpriteData) }) } + ); + std::vector vertex_shader_code = { + #include + }; + p_vertex_shader = std::make_shared(vertex_shader_code, ShaderType::Vertex, std::move(vertex_shader_layout)); + ShaderLayout fragment_shader_layout( + { + { 1, + ShaderSetLayout({ + { 0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER } + }) + } + }, {} + ); + std::vector fragment_shader_code = { + #include + }; + p_fragment_shader = std::make_shared(fragment_shader, ShaderType::Fragment, std::move(fragment_shader_layout)); + + std::function functor = [this](const EventBase& event) + { + if(event.What() == Event::ResizeEventCode) + m_pipeline.Destroy(); + }; + EventBus::RegisterListener({ functor, "__ScopRender2DPass" }); + + p_viewer_data_set = std::make_shared(p_vertex_shader->GetShaderLayout().set_layouts[0].second, p_vertex_shader->GetPipelineLayout().set_layouts[0], ShaderType::Vertex); + p_texture_set = std::make_shared(p_fragment_shader->GetShaderLayout().set_layouts[0].second, p_fragment_shader->GetPipelineLayout().set_layouts[0], ShaderType::Fragment); + + p_viewer_data_buffer = std::make_shared(); + p_viewer_data_buffer->Init(sizeof(ViewerData2D)); + for(std::size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) + { + p_viewer_data_set->SetUniformBuffer(i, 0, p_viewer_data_buffer->Get(i)); + p_viewer_data_set->Update(i); + } + } + + void Render2DPass::Pass(Scene& scene, Renderer& renderer, Texture& render_target) + { + if(m_pipeline.GetPipeline() == VK_NULL_HANDLE) + { + GraphicPipelineDescriptor pipeline_descriptor; + pipeline_descriptor.vertex_shader = p_vertex_shader; + pipeline_descriptor.fragment_shader = p_fragment_shader; + pipeline_descriptor.color_attachments = { &render_target }; + pipeline_descriptor.clear_color_attachments = false; + m_pipeline.Init(pipeline_descriptor); + } + + std::uint32_t frame_index = renderer.GetCurrentFrameIndex(); + + ViewerData viewer_data; + viewer_data.projection = Mat4f::Ortho(0.0f, render_target.GetWidth(), render_target.GetHeight(), 0.0f); + static CPUBuffer buffer(sizeof(ViewerData2D)); + std::memcpy(buffer.GetData(), &viewer_data, buffer.GetSize()); + p_viewer_data_buffer->SetData(buffer, frame_index); + + VkCommandBuffer cmd = renderer.GetActiveCommandBuffer(); + m_pipeline.BindPipeline(cmd, 0, {}); + for(auto sprite : scene.GetSprites()) + { + SpriteData sprite_data; + sprite_data.position = Vec2f{ static_cast(sprite->GetPosition().x), static_cast(sprite->GetPosition().y) }; + sprite_data.color = sprite->GetColor(); + if(!sprite->IsSetInit()) + sprite->UpdateDescriptorSet(*p_texture_set); + sprite->Bind(frame_index, cmd); + std::array sets = { p_viewer_data_set->GetSet(frame_index), sprite->GetSet(frame_index) }; + vkCmdBindDescriptorSets(cmd, m_pipeline.GetPipelineBindPoint(), m_pipeline.GetPipelineLayout(), 0, sets.size(), sets.data(), 0, nullptr); + vkCmdPushConstants(cmd, m_pipeline.GetPipelineLayout(), VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(SpriteData), &sprite_data); + sprite->GetMesh()->Draw(cmd, renderer.GetDrawCallsCounterRef(), renderer.GetPolygonDrawnCounterRef()); + } + m_pipeline.EndPipeline(cmd); + } + + void Render2DPass::Destroy() + { + m_pipeline.Destroy(); + p_vertex_shader.reset(); + p_fragment_shader.reset(); + p_viewer_data_set.reset(); + p_viewer_data_buffer->Destroy(); + p_texture_set.reset(); + } +} diff --git a/runtime/Sources/Renderer/RenderPasses/FinalPass.cpp b/runtime/Sources/Renderer/RenderPasses/FinalPass.cpp new file mode 100644 index 0000000..d3b5cbe --- /dev/null +++ b/runtime/Sources/Renderer/RenderPasses/FinalPass.cpp @@ -0,0 +1,76 @@ +#include +#include +#include +#include +#include +#include + +namespace mlx +{ + void FinalPass::Init() + { + ShaderLayout vertex_shader_layout( + {}, {} + ); + std::vector vertex_shader_code = { + #include + }; + p_vertex_shader = std::make_shared(vertex_shader_code, ShaderType::Vertex, std::move(vertex_shader_layout)); + ShaderLayout fragment_shader_layout( + { + { 0, + ShaderSetLayout({ + { 0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER } + }) + } + }, {} + ); + std::vector fragment_shader_code = { + #include + }; + p_fragment_shader = std::make_shared(fragment_shader_code, ShaderType::Fragment, std::move(fragment_shader_layout)); + + std::function functor = [this](const EventBase& event) + { + if(event.What() == Event::ResizeEventCode) + m_pipeline.Destroy(); + }; + EventBus::RegisterListener({ functor, "__ScopFinalPass" }); + + p_set = std::make_shared(p_fragment_shader->GetShaderLayout().set_layouts[0].second, p_fragment_shader->GetPipelineLayout().set_layouts[0], ShaderType::Fragment); + } + + void FinalPass::Pass(Scene& scene, Renderer& renderer, Texture& render_target) + { + if(m_pipeline.GetPipeline() == VK_NULL_HANDLE) + { + GraphicPipelineDescriptor pipeline_descriptor; + pipeline_descriptor.vertex_shader = p_vertex_shader; + pipeline_descriptor.fragment_shader = p_fragment_shader; + pipeline_descriptor.renderer = &renderer; + pipeline_descriptor.no_vertex_inputs = true; + m_pipeline.Init(pipeline_descriptor); + } + + VkCommandBuffer cmd = renderer.GetActiveCommandBuffer(); + + p_set->SetImage(renderer.GetCurrentFrameIndex(), 0, render_target); + p_set->Update(renderer.GetCurrentFrameIndex(), cmd); + + m_pipeline.BindPipeline(cmd, renderer.GetSwapchainImageIndex(), { 0.0f, 0.0f, 0.0f, 1.0f }); + VkDescriptorSet set = p_set->GetSet(renderer.GetCurrentFrameIndex()); + vkCmdBindDescriptorSets(cmd, m_pipeline.GetPipelineBindPoint(), m_pipeline.GetPipelineLayout(), 0, 1, &set, 0, nullptr); + vkCmdDraw(cmd, 3, 1, 0, 0); + renderer.GetDrawCallsCounterRef()++; + renderer.GetPolygonDrawnCounterRef()++; + m_pipeline.EndPipeline(cmd); + } + + void FinalPass::Destroy() + { + m_pipeline.Destroy(); + p_vertex_shader.reset(); + p_fragment_shader.reset(); + p_set.reset(); + } +} diff --git a/runtime/Sources/Renderer/RenderPasses/Passes.cpp b/runtime/Sources/Renderer/RenderPasses/Passes.cpp new file mode 100644 index 0000000..a2a3e45 --- /dev/null +++ b/runtime/Sources/Renderer/RenderPasses/Passes.cpp @@ -0,0 +1,45 @@ +#include +#include +#include +#include + +namespace mlx +{ + void RenderPasses::Init() + { + m_2Dpass.Init(); + m_final.Init(); + } + + void RenderPasses::Pass(Scene& scene, Renderer& renderer) + { + if(!m_main_render_texture.IsInit()) + { + std::function functor = [this, renderer](const EventBase& event) + { + if(event.What() == Event::ResizeEventCode) + { + m_main_render_texture.Destroy(); + auto extent = kvfGetSwapchainImagesSize(renderer.GetSwapchain()); + m_main_render_texture.Init({}, extent.width, extent.height); + } + }; + EventBus::RegisterListener({ functor, "__ScopRenderPasses" }); + auto extent = kvfGetSwapchainImagesSize(renderer.GetSwapchain()); + + m_main_render_texture.Init({}, extent.width, extent.height); + } + + m_main_render_texture.Clear(renderer.GetActiveCommandBuffer(), Vec4f{ 0.0f, 0.0f, 0.0f, 1.0f }); + + m_2Dpass.Pass(scene, renderer, m_main_render_texture); + m_final.Pass(scene, renderer, m_main_render_texture); + } + + void RenderPasses::Destroy() + { + m_2Dpass.Destroy(); + m_final.Destroy(); + m_main_render_texture.Destroy(); + } +} diff --git a/runtime/Sources/Renderer/Renderer.cpp b/runtime/Sources/Renderer/Renderer.cpp index 0c131d0..00f09b2 100644 --- a/runtime/Sources/Renderer/Renderer.cpp +++ b/runtime/Sources/Renderer/Renderer.cpp @@ -1,195 +1,139 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Renderer.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/12/18 17:25:16 by maldavid #+# #+# */ -/* Updated: 2024/04/24 01:58:51 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #include - #include -#include -#include +#include +#include +#include +#include namespace mlx { - void Renderer::Init(NonOwningPtr render_target) + namespace Internal { - MLX_PROFILE_FUNCTION(); - if(!render_target) + struct ResizeEventBroadcast : public EventBase { - m_surface.Create(*this); - m_swapchain.Init(this); - m_pass.Init(m_swapchain.GetImagesFormat(), VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); - for(std::size_t i = 0; i < m_swapchain.GetImagesNumber(); i++) - m_framebuffers.emplace_back().Init(m_pass, m_swapchain.GetImage(i)); - } - else + Event What() const override { return Event::ResizeEventCode; } + }; + + struct FrameBeginEventBroadcast : public EventBase { - m_render_target = render_target; - m_render_target->TransitionLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); - m_pass.Init(m_render_target->GetFormat(), m_render_target->GetLayout()); - m_framebuffers.emplace_back().Init(m_pass, *static_cast(m_render_target)); - } - m_cmd.Init(); + Event What() const override { return Event::FrameBeginEventCode; } + }; + } + + void Renderer::Init(NonOwningPtr window) + { + std::function functor = [this](const EventBase& event) + { + if(event.What() == Event::ResizeEventCode) + this->RequireFramebufferResize(); + }; + EventBus::RegisterListener({ functor, "__ScopRenderer" }); + + p_window = window; + + auto& render_core = RenderCore::Get(); + m_surface = p_window->CreateVulkanSurface(render_core::GetInstance()); + DebugLog("Vulkan : surface created"); + + CreateSwapchain(); for(std::size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { - m_render_finished_semaphores[i].Init(); - m_image_available_semaphores[i].Init(); + m_image_available_semaphores[i] = kvfCreateSemaphore(render_core.GetDevice()); + DebugLog("Vulkan : image available semaphore created"); + m_render_finished_semaphores[i] = kvfCreateSemaphore(render_core.GetDevice()); + DebugLog("Vulkan : render finished semaphore created"); + m_cmd_buffers[i] = kvfCreateCommandBuffer(render_core.GetDevice()); + DebugLog("Vulkan : command buffer created"); + m_cmd_fences[i] = kvfCreateFence(render_core.GetDevice()); + DebugLog("Vulkan : fence created"); } - - m_uniform_buffer.reset(new UniformBuffer); - #ifdef DEBUG - m_uniform_buffer->Create(this, sizeof(glm::mat4), "__mlx_matrices_uniform_buffer_"); - #else - m_uniform_buffer->Create(this, sizeof(glm::mat4), nullptr); - #endif - - DescriptorSetLayout vert_layout; - DescriptorSetLayout frag_layout; - - vert_layout.Init({ - {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER} - }, VK_SHADER_STAGE_VERTEX_BIT); - frag_layout.Init({ - {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER} - }, VK_SHADER_STAGE_FRAGMENT_BIT); - - m_vert_set.Init(this, &RenderCore::Get().GetDescriptorPool(), std::move(vert_layout)); - m_frag_set.Init(this, &RenderCore::Get().GetDescriptorPool(), std::move(frag_layout)); - - m_vert_set.WriteDescriptor(0, m_uniform_buffer.Get()); - - m_pipeline.Init(*this); - - m_framebuffer_resized = false; } bool Renderer::BeginFrame() { - MLX_PROFILE_FUNCTION(); - auto device = RenderCore::Get().GetDevice().Get(); - - if(!m_render_target) + kvfWaitForFence(RenderCore::Get().GetDevice(), m_cmd_fences[m_current_frame_index]); + VkResult result = vkAcquireNextImageKHR(RenderCore::Get().GetDevice(), m_swapchain, UINT64_MAX, m_image_available_semaphores[m_current_frame_index], VK_NULL_HANDLE, &m_swapchain_image_index); + if(result == VK_ERROR_OUT_OF_DATE_KHR) { - m_cmd.GetCmdBuffer(m_current_frame_index).WaitForExecution(); - VkResult result = vkAcquireNextImageKHR(device, m_swapchain.Get(), UINT64_MAX, m_image_available_semaphores[m_current_frame_index].Get(), VK_NULL_HANDLE, &m_image_index); - - if(result == VK_ERROR_OUT_OF_DATE_KHR) - { - RecreateRenderData(); - return false; - } - else if(result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) - FatalError("Vulkan error : failed to acquire swapchain image"); - } - else - { - m_image_index = 0; - if(m_render_target->GetLayout() != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) - m_render_target->TransitionLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + DestroySwapchain(); + CreateSwapchain(); + EventBus::SendBroadcast(Internal::ResizeEventBroadcast{}); + return false; } + else if(result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) + FatalError("Vulkan error : failed to acquire swapchain image, %", kvfVerbaliseVkResult(result)); - m_cmd.GetCmdBuffer(m_current_frame_index).Reset(); - m_cmd.GetCmdBuffer(m_current_frame_index).BeginRecord(); - auto& fb = _framebuffers[_image_index]; - m_pass.Begin(GetActiveCmdBuffer(), fb); - - m_pipeline.BindPipeline(m_cmd.GetCmdBuffer(m_current_frame_index)); - - VkViewport viewport{}; - viewport.x = 0.0f; - viewport.y = 0.0f; - viewport.width = static_cast(fb.GetWidth()); - viewport.height = static_cast(fb.GetHeight()); - viewport.minDepth = 0.0f; - viewport.maxDepth = 1.0f; - vkCmdSetViewport(m_cmd.GetCmdBuffer(m_current_frame_index).Get(), 0, 1, &viewport); - - VkRect2D scissor{}; - scissor.offset = { 0, 0 }; - scissor.extent = { fb.GetWidth(), fb.GetHeight()}; - vkCmdSetScissor(m_cmd.GetCmdBuffer(m_current_frame_index).Get(), 0, 1, &scissor); - + vkResetCommandBuffer(m_cmd_buffers[m_current_frame_index], 0); + kvfBeginCommandBuffer(m_cmd_buffers[m_current_frame_index], 0); + m_drawcalls = 0; + m_polygons_drawn = 0; + EventBus::SendBroadcast(Internal::FrameBeginEventBroadcast{}); return true; } void Renderer::EndFrame() { - MLX_PROFILE_FUNCTION(); - m_pass.End(GetActiveCmdBuffer()); - m_cmd.GetCmdBuffer(m_current_frame_index).EndRecord(); - - if(!m_render_target) + VkPipelineStageFlags wait_stages[] = { VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT }; + kvfEndCommandBuffer(m_cmd_buffers[m_current_frame_index]); + kvfSubmitCommandBuffer(RenderCore::Get().GetDevice(), m_cmd_buffers[m_current_frame_index], KVF_GRAPHICS_QUEUE, m_render_finished_semaphores[m_current_frame_index], m_image_available_semaphores[m_current_frame_index], m_cmd_fences[m_current_frame_index], wait_stages); + if(!kvfQueuePresentKHR(RenderCore::Get().GetDevice(), m_render_finished_semaphores[m_current_frame_index], m_swapchain, m_swapchain_image_index) || m_framebuffers_resize) { - m_cmd.GetCmdBuffer(m_current_frame_index).Submit(&m_render_finished_semaphores[m_current_frame_index]); - - VkSwapchainKHR swapchain = m_swapchain.Get(); - VkSemaphore signal_semaphores[] = { m_render_finished_semaphores[m_current_frame_index].Get() }; - - VkPresentInfoKHR present_info{}; - present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; - present_info.waitSemaphoreCount = 1; - present_info.pWaitSemaphores = signal_semaphores; - present_info.swapchainCount = 1; - present_info.pSwapchains = &swapchain; - present_info.pImageIndices = &m_image_index; - VkResult result = vkQueuePresentKHR(RenderCore::Get().GetQueue().GetPresent(), &present_info); - if(result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR || m_framebuffer_resized) - { - m_framebuffer_resized = false; - RecreateRenderData(); - } - else if(result != VK_SUCCESS) - FatalError("Vulkan error : failed to present swap chain image"); - m_current_frame_index = (m_current_frame_index + 1) % MAX_FRAMES_IN_FLIGHT; - } - else - { - m_cmd.GetCmdBuffer(m_current_frame_index).SubmitIdle(true); - m_current_frame_index = 0; + m_framebuffers_resize = false; + DestroySwapchain(); + CreateSwapchain(); + EventBus::SendBroadcast(Internal::ResizeEventBroadcast{}); } + m_current_frame_index = (m_current_frame_index + 1) % MAX_FRAMES_IN_FLIGHT; + kvfResetDeviceDescriptorPools(RenderCore::Get().GetDevice()); } - void Renderer::RecreateRenderData() + void Renderer::CreateSwapchain() { - m_swapchain.Recreate(); - m_pass.Destroy(); - m_pass.Init(m_swapchain.GetImagesFormat(), VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); - for(auto& fb : m_framebuffers) - fb.Destroy(); - m_framebuffers.clear(); - for(std::size_t i = 0; i < m_swapchain.GetImagesNumber(); i++) - m_framebuffers.emplace_back().Init(m_pass, m_swapchain.GetImage(i)); + Vec2ui drawable_size = p_window->GetVulkanDrawableSize(); + VkExtent2D extent = { drawable_size.x, drawable_size.y }; + m_swapchain = kvfCreateSwapchainKHR(RenderCore::Get().GetDevice(), RenderCore::Get().GetPhysicalDevice(), m_surface, extent, false); + + std::uint32_t images_count = kvfGetSwapchainImagesCount(m_swapchain); + std::vector tmp(images_count); + m_swapchain_images.resize(images_count); + vkGetSwapchainImagesKHR(RenderCore::Get().GetDevice(), m_swapchain, &images_count, tmp.data()); + for(std::size_t i = 0; i < images_count; i++) + { + m_swapchain_images[i].Init(tmp[i], kvfGetSwapchainImagesFormat(m_swapchain), extent.width, extent.height); + m_swapchain_images[i].TransitionLayout(VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); + m_swapchain_images[i].CreateImageView(VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_ASPECT_COLOR_BIT); + } + DebugLog("Vulkan : swapchain created"); } - void Renderer::Destroy() + void Renderer::DestroySwapchain() { - MLX_PROFILE_FUNCTION(); - vkDeviceWaitIdle(RenderCore::Get().GetDevice().Get()); + RenderCore::Get().WaitDeviceIdle(); + for(Image& img : m_swapchain_images) + img.DestroyImageView(); + kvfDestroySwapchainKHR(RenderCore::Get().GetDevice(), m_swapchain); + DebugLog("Vulkan : swapchain destroyed"); + } - m_ipeline.Destroy(); - muniform_buffer->Destroy(); - mvert_layout.Destroy(); - mfrag_layout.Destroy(); - mfrag_set.Destroy(); - mvert_set.Destroy(); - mcmd.Destroy(); - mpass.Destroy(); - if(!m_render_target) + void Renderer::Destroy() noexcept + { + auto& render_core = RenderCore::Get(); + render_core.WaitDeviceIdle(); + + for(std::size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { - m_swapchain.Destroy(); - m_surface.Destroy(); + kvfDestroySemaphore(render_core.GetDevice(), m_image_available_semaphores[i]); + DebugLog("Vulkan : image available semaphore destroyed"); + kvfDestroySemaphore(render_core.GetDevice(), m_render_finished_semaphores[i]); + DebugLog("Vulkan : render finished semaphore destroyed"); + kvfDestroyFence(render_core.GetDevice(), m_cmd_fences[i]); + DebugLog("Vulkan : fence destroyed"); } - for(auto& fb : m_framebuffers) - fb.Destroy(); - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - m_semaphores[i].Destroy(); + + DestroySwapchain(); + vkDestroySurfaceKHR(render_core.GetInstance(), m_surface, nullptr); + DebugLog("Vulkan : surface destroyed"); + m_surface = VK_NULL_HANDLE; } } diff --git a/runtime/Sources/Renderer/Renderpass/Framebuffer.cpp b/runtime/Sources/Renderer/Renderpass/Framebuffer.cpp deleted file mode 100644 index a84b4a3..0000000 --- a/runtime/Sources/Renderer/Renderpass/Framebuffer.cpp +++ /dev/null @@ -1,49 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Framebuffer.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:18:06 by maldavid #+# #+# */ -/* Updated: 2024/04/23 22:28:07 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include -#include - -namespace mlx -{ - void FrameBuffer::Init(RenderPass& renderpass, Image& image) - { - VkImageView attachments[] = { image.GetImageView() }; - - m_width = image.GetWidth(); - m_height = image.GetHeight(); - - VkFramebufferCreateInfo framebuffer_info{}; - framebuffer_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; - framebuffer_info.renderPass = renderpass.get(); - framebuffer_info.attachmentCount = 1; - framebuffer_info.pAttachments = attachments; - framebuffer_info.width = _width; - framebuffer_info.height = _height; - framebuffer_info.layers = 1; - - VkResult res = vkCreateFramebuffer(RenderCore::Get().GetDevice().Get(), &framebuffer_info, nullptr, &m_framebuffer); - if(res != VK_SUCCESS) - FatalError("Vulkan : failed to create a framebuffer, %s", RCore::verbaliseResultVk(res)); - DebugLog("Vulkan : created new framebuffer"); - } - - void FrameBuffer::Destroy() noexcept - { - vkDestroyFramebuffer(RenderCore::Get().GetDevice().Get(), m_framebuffer, nullptr); - m_framebuffer = VK_NULL_HANDLE; - DebugLog("Vulkan : destroyed a framebuffer"); - } -} diff --git a/runtime/Sources/Renderer/Renderpass/Renderpass.cpp b/runtime/Sources/Renderer/Renderpass/Renderpass.cpp deleted file mode 100644 index b5d25e9..0000000 --- a/runtime/Sources/Renderer/Renderpass/Renderpass.cpp +++ /dev/null @@ -1,117 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Renderpass.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:21:36 by maldavid #+# #+# */ -/* Updated: 2024/04/23 22:31:09 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include "vk_render_pass.h" -#include -#include -#include -#include - -namespace mlx -{ - static const VkClearValue clear_color = {{{ 0.f, 0.f, 0.f, 1.0f }}}; // wtf, this mess to satisfy a warning - - void RenderPass::Init(VkFormat attachement_format, VkImageLayout layout) - { - VkAttachmentDescription color_attachment{}; - color_attachment.format = attachement_format; - color_attachment.samples = VK_SAMPLE_COUNT_1_BIT; - color_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; - color_attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE; - color_attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; - color_attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; - color_attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; - color_attachment.finalLayout = layout; - - VkAttachmentReference color_attachment_ref{}; - colorAttachmentRef.attachment = 0; - colorAttachmentRef.layout = (layout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR ? VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL : layout); - - VkSubpassDescription subpass1{}; - subpass1.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; - subpass1.colorAttachmentCount = 1; - subpass1.pColorAttachments = &colorAttachmentRef; - - VkSubpassDescription subpasses[] = { subpass1 }; - - std::vector subpasses_deps; - subpasses_deps.emplace_back(); - subpasses_deps.back().srcSubpass = VK_SUBPASS_EXTERNAL; - subpasses_deps.back().dstSubpass = 0; - subpasses_deps.back().srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; - subpasses_deps.back().dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; - subpasses_deps.back().srcAccessMask = VK_ACCESS_MEMORY_READ_BIT; - subpasses_deps.back().dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; - subpasses_deps.back().dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT; - - subpasses_deps.emplace_back(); - subpasses_deps.back().srcSubpass = 0; - subpasses_deps.back().dstSubpass = VK_SUBPASS_EXTERNAL; - subpasses_deps.back().srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; - subpasses_deps.back().dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; - subpasses_deps.back().srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; - subpasses_deps.back().dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; - subpasses_deps.back().dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT; - - VkRenderPassCreateInfo render_pass_info{}; - render_pass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; - render_pass_info.attachmentCount = 1; - render_pass_info.pAttachments = &color_attachment; - render_pass_info.subpassCount = sizeof(subpasses) / sizeof(VkSubpassDescription); - render_pass_info.pSubpasses = subpasses; - render_pass_info.dependencyCount = static_cast(subpasses_deps.size()); - render_pass_info.pDependencies = subpasses_deps.data(); - - VkResult res = vkCreateRenderPass(RenderCore::Get().GetDevice().Get(), &render_pass_info, nullptr, &m_render_pass); - if(res != VK_SUCCESS) - FatalError("Vulkan : failed to create render pass, %", VerbaliseVkResult(res)); - DebugLog("Vulkan : created new render pass"); - } - - void RenderPass::Begin(class CommandBuffer& cmd, class FrameBuffer& fb) - { - MLX_PROFILE_FUNCTION(); - if(m_is_running) - return; - - VkRenderPassBeginInfo render_pass_info{}; - render_pass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; - render_pass_info.renderPass = m_render_pass; - render_pass_info.framebuffer = fb.Get(); - render_pass_info.renderArea.offset = { 0, 0 }; - render_pass_info.renderArea.extent = { fb.GetWidth(), fb.GetHeight() }; - render_pass_info.clearValueCount = 1; - render_pass_info.pClearValues = &clear_color; - - vkCmdBeginRenderPass(cmd.Get(), &render_pass_info, VK_SUBPASS_CONTENTS_INLINE); - - m_is_running = true; - } - - void RenderPass::End(class CommandBuffer& cmd) - { - MLX_PROFILE_FUNCTION(); - if(!m_is_running) - return; - vkCmdEndRenderPass(cmdd.Get()); - m_is_running = false; - } - - void RenderPass::Destroy() noexcept - { - vkDestroyRenderPass(RenderCore::Get().GetDevice().Get(), m_render_pass, nullptr); - m_render_pass = VK_NULL_HANDLE; - DebugLog("Vulkan : destroyed a renderpass"); - } -} diff --git a/runtime/Sources/Renderer/Renderpass/Swapchain.cpp b/runtime/Sources/Renderer/Renderpass/Swapchain.cpp deleted file mode 100644 index ffc5df9..0000000 --- a/runtime/Sources/Renderer/Renderpass/Swapchain.cpp +++ /dev/null @@ -1,150 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Swapchain.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:22:28 by maldavid #+# #+# */ -/* Updated: 2024/04/23 22:43:10 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include -#include - -namespace mlx -{ - void SwapChain::Init(NonOwningPtr renderer) - { - VkDevice device = RenderCore::get().GetDevice().Get(); - - m_renderer = renderer; - m_swapchain_support = QuerySwapChainSupport(RenderCore::Get().GetDevice().GetPhysicalDevice()); - - VkSurfaceFormatKHR surface_format = renderer->GetSurface().ChooseSwapSurfaceFormat(m_swapchain_support.formats); - VkPresentModeKHR present_mode = ChooseSwapPresentMode(m_swapchain_support.present_modes); - m_extent = ChooseSwapExtent(m_swapchain_support.capabilities); - - std::uint32_t image_count = m_swapchain_support.capabilities.minImageCount + 1; - if(m_swapchain_support.capabilities.maxImageCount > 0 && image_count > m_swapchain_support.capabilities.maxImageCount) - image_count = m_swapchain_support.capabilities.maxImageCount; - - Queues::QueueFamilyIndices indices = RenderCore::Get().GetQueue().FindQueueFamilies(RenderCore::Get().GetDevice().GetPhysicalDevice()); - std::uint32_t queue_family_indices[] = { indices.graphics_family.value(), indices.present_family.value() }; - - VkSwapchainCreateInfoKHR create_info{}; - create_info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; - create_info.surface = renderer->GetSurface().Get(); - create_info.minImageCount = image_count; - create_info.imageFormat = surface_format.format; - create_info.imageColorSpace = surface_format.colorSpace; - create_info.imageExtent = m_extent; - create_info.imageArrayLayers = 1; - create_info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; - create_info.preTransform = m_swapchain_support.capabilities.currentTransform; - create_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; - create_info.presentMode = present_mode; - create_info.clipped = VK_TRUE; - create_info.oldSwapchain = VK_NULL_HANDLE; - if(indices.graphics_family != indices.present_family) - { - create_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT; - create_info.queueFamilyIndexCount = 2; - create_info.pQueueFamilyIndices = queue_family_indices; - } - else - create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; - - VkResult res = vkCreateSwapchainKHR(device, &create_info, nullptr, &m_swapchain); - if(res != VK_SUCCESS) - FatalError("Vulkan : failed to create the swapchain, %", VerbaliseVkResult(res)); - - std::vector tmp; - vkGetSwapchainImagesKHR(device, m_swapchain, &image_count, nullptr); - m_images.resize(image_count); - tmp.resize(image_count); - vkGetSwapchainImagesKHR(device, m_swapchain, &image_count, tmp.data()); - - for(std::size_t i = 0; i < image_count; i++) - { - m_images[i].Create(tmp[i], surface_format.format, m_extent.width, m_extent.height); - m_images[i].TransitionLayout(VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); - m_images[i].CreateImageView(VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_ASPECT_COLOR_BIT); - } - - m_swapchain_image_format = surface_format.format; - DebugLog("Vulkan : created new swapchain"); - } - - SwapChain::SwapChainSupportDetails SwapChain::QuerySwapChainSupport(VkPhysicalDevice device) - { - SwapChain::SwapChainSupportDetails details; - VkSurfaceKHR surface = m_renderer->GetSurface().Get(); - - if(vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, surface, &details.capabilities) != VK_SUCCESS) - FatalError("Vulkan : unable to retrieve surface capabilities"); - - std::uint32_t format_count = 0; - vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &format_count, nullptr); - - if(format_count != 0) - { - details.formats.resize(format_count); - vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &format_count, details.formats.data()); - } - - std::uint32_t present_mode_count; - vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface, &present_mode_count, nullptr); - - if(present_mode_count != 0) - { - details.present_modes.resize(present_mode_count); - vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface, &present_mode_count, details.present_modes.data()); - } - - return details; - } - - VkPresentModeKHR SwapChain::chooseSwapPresentMode([[maybe_unused]] const std::vector& available_present_modes) - { - // in the future, you may choose to activate vsync or not - return VK_PRESENT_MODE_IMMEDIATE_KHR; - } - - VkExtent2D SwapChain::ChooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities) - { - if(capabilities.currentExtent.width != std::numeric_limits::max()) - return capabilities.currentExtent; - - int width, height; - glfwGetFramebufferSize(m_renderer->GetWindow()->GetNativeWindow(), &width, &height); - - VkExtent2D actual_extent = { static_cast(width), static_cast(height) }; - - actual_extent.width = std::clamp(actual_extent.width, capabilities.minImageExtent.width, capabilities.maxImageExtent.width); - actual_extent.height = std::clamp(actual_extent.height, capabilities.minImageExtent.height, capabilities.maxImageExtent.height); - - return actual_extent; - } - - void SwapChain::Recreate() - { - Destroy(); - Init(m_renderer); - } - - void SwapChain::Destroy() noexcept - { - if(m_swapchain == VK_NULL_HANDLE) - return; - vkDeviceWaitIdle(RenderCore::Get().GetDevice().Get()); - vkDestroySwapchainKHR(RenderCore::Get().GetDevice().Get(), m_swapchain, nullptr); - m_swapchain = VK_NULL_HANDLE; - for(Image& img : m_images) - img.DestroyImageView(); - } -} diff --git a/runtime/Sources/Renderer/SceneRenderer.cpp b/runtime/Sources/Renderer/SceneRenderer.cpp new file mode 100644 index 0000000..b6f7860 --- /dev/null +++ b/runtime/Sources/Renderer/SceneRenderer.cpp @@ -0,0 +1,23 @@ +#include +#include +#include +#include +#include + +namespacemlx +{ + void SceneRenderer::Init() + { + m_passes.Init(); + } + + void SceneRenderer::Render(Scene& scene, Renderer& renderer) + { + m_passes.Pass(scene, renderer); + } + + void SceneRenderer::Destroy() + { + m_passes.Destroy(); + } +} diff --git a/runtime/Sources/Renderer/Texts/Font.cpp b/runtime/Sources/Renderer/Texts/Font.cpp deleted file mode 100644 index 6c93646..0000000 --- a/runtime/Sources/Renderer/Texts/Font.cpp +++ /dev/null @@ -1,88 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Font.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/11 22:06:09 by kbz_8 #+# #+# */ -/* Updated: 2024/04/23 22:48:30 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include - -constexpr const int RANGE = 1024; - -namespace mlx -{ - Font::Font(Renderer& renderer, const std::filesystem::path& path, float scale) : m_name(path.string()), m_renderer(renderer), m_scale(scale) - { - m_build_data = path; - } - - Font::Font(class Renderer& renderer, const std::string& name, const std::vector& ttf_data, float scale) : m_name(name), m_renderer(renderer), m_scale(scale) - { - m_build_data = ttf_data; - } - - void Font::BuildFont() - { - MLX_PROFILE_FUNCTION(); - std::vector file_bytes; - if(std::holds_alternative(m_build_data)) - { - std::ifstream file(std::get(m_build_data), std::ios::binary); - if(!file.is_open()) - { - Error("Font load : cannot open font file, %", m_name.c_str()); - return; - } - std::ifstream::pos_type fileSize = std::filesystem::file_size(std::get(m_build_data)); - file.seekg(0, std::ios::beg); - file_bytes.resize(fileSize); - file.read(reinterpret_cast(file_bytes.data()), fileSize); - file.close(); - } - - std::vector tmp_bitmap(RANGE * RANGE); - std::vector vulkan_bitmap(RANGE * RANGE * 4); - stbtt_pack_context pc; - stbtt_PackBegin(&pc, tmp_bitmap.data(), RANGE, RANGE, RANGE, 1, nullptr); - if(std::holds_alternative(m_build_data)) - stbtt_PackFontRange(&pc, file_bytes.data(), 0, m_scale, 32, 96, m_cdata.data()); - else - stbtt_PackFontRange(&pc, std::get>(m_build_data).data(), 0, m_scale, 32, 96, m_cdata.data()); - stbtt_PackEnd(&pc); - for(int i = 0, j = 0; i < RANGE * RANGE; i++, j += 4) - { - vulkan_bitmap[j + 0] = tmp_bitmap[i]; - vulkan_bitmap[j + 1] = tmp_bitmap[i]; - vulkan_bitmap[j + 2] = tmp_bitmap[i]; - vulkan_bitmap[j + 3] = tmp_bitmap[i]; - } - #ifdef DEBUG - m_atlas.Create(vulkan_bitmap.data(), RANGE, RANGE, VK_FORMAT_R8G8B8A8_UNORM, std::string(m_name + "_font_altas").c_str(), true); - #else - m_atlas.Create(vulkan_bitmap.data(), RANGE, RANGE, VK_FORMAT_R8G8B8A8_UNORM, nullptr, true); - #endif - m_atlas.SetDescriptor(m_renderer.GetFragDescriptorSet().Duplicate()); - m_is_init = true; - } - - void Font::Destroy() - { - MLX_PROFILE_FUNCTION(); - m_atlas.Destroy(); - m_is_init = false; - } - - Font::~Font() - { - if(m_is_init) - destroy(); - } -} diff --git a/runtime/Sources/Renderer/Texts/FontLibrary.cpp b/runtime/Sources/Renderer/Texts/FontLibrary.cpp deleted file mode 100644 index eed3b60..0000000 --- a/runtime/Sources/Renderer/Texts/FontLibrary.cpp +++ /dev/null @@ -1,68 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* FontLibrary.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/18 09:28:14 by maldavid #+# #+# */ -/* Updated: 2024/04/24 01:28:40 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include -#include - -namespace mlx -{ - std::shared_ptr FontLibrary::GetFontData(FontID id) - { - MLX_PROFILE_FUNCTION(); - if(!m_cache.count(id) || std::find(m_invalid_ids.begin(), m_invalid_ids.end(), id) != m_invalid_ids.end()) - FatalError("Font Library : wrong font ID '%'", id); - return m_cache[id]; - } - - FontID FontLibrary::AddFontToLibrary(std::shared_ptr font) - { - MLX_PROFILE_FUNCTION(); - auto it = std::find_if(m_cache.begin(), m_cache.end(), [&](const std::pair>& v) - { - return v.second->GetScale() == font->GetScale() && - v.second->GetName() == font->GetName() && - std::find(m_invalid_ids.begin(), m_invalid_ids.end(), v.first) == m_invalid_ids.end(); - }); - if(it != m_cache.end()) - return it->first; - font->BuildFont(); - m_cache[m_current_id] = font; - m_current_id++; - return m_current_id - 1; - } - - void FontLibrary::RemoveFontFromLibrary(FontID id) - { - MLX_PROFILE_FUNCTION(); - if(!m_cache.count(id) || std::find(m_invalid_ids.begin(), m_invalid_ids.end(), id) != m_invalid_ids.end()) - { - Warning("Font Library : trying to remove a font with an unkown or invalid ID '%'", id); - return; - } - m_cache[id]->Destroy(); - m_invalid_ids.push_back(id); - } - - void FontLibrary::ClearLibrary() - { - MLX_PROFILE_FUNCTION(); - for(auto& [id, font] : m_cache) - { - font->Destroy(); - m_invalid_ids.push_back(id); - } - // do not `_cache.clear();` as it releases the fonts and may not destroy the texture atlas that is in use by command buffers - } -} diff --git a/runtime/Sources/Renderer/Texts/Text.cpp b/runtime/Sources/Renderer/Texts/Text.cpp deleted file mode 100644 index 162307a..0000000 --- a/runtime/Sources/Renderer/Texts/Text.cpp +++ /dev/null @@ -1,78 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* Text.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/11 00:11:56 by maldavid #+# #+# */ -/* Updated: 2024/04/24 01:33:58 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include - -namespace mlx -{ - void Text::Init(std::string text, FontID font, std::uint32_t color, std::vector vbo_data, std::vector ibo_data) - { - MLX_PROFILE_FUNCTION(); - if(m_is_init) - return; - m_text = std::move(text); - m_color = color; - m_font = font; - #ifdef DEBUG - std::string debug_name = m_text; - for(char& c : debug_name) - { - if(c == ' ' || c == '"' || c == '\'') - c = '_'; - } - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - m_vbo[i].Create(sizeof(Vertex) * vbo_data.size(), static_cast(vbo_data.data()), debug_name.c_str()); - m_ibo.Create(sizeof(std::uint16_t) * ibo_data.size(), ibo_data.data(), debug_name.c_str()); - #else - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - m_vbo[i].Create(sizeof(Vertex) * vbo_data.size(), static_cast(vbo_data.data()), nullptr); - m_ibo.Create(sizeof(std::uint16_t) * ibo_data.size(), ibo_data.data(), nullptr); - #endif - m_is_init = true; - } - - void Text::Bind(Renderer& renderer) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!m_is_init) - return; - m_vbo[renderer.GetActiveImageIndex()].Bind(renderer); - m_ibo.Bind(renderer); - } - - void Text::updateVertexData(int frame, std::vector vbo_data) - { - MLX_PROFILE_FUNCTION(); - if(!m_is_init) - return; - m_vbo[frame].SetData(sizeof(Vertex) * vbo_data.size(), static_cast(vbo_data.data())); - } - - void Text::destroy() noexcept - { - MLX_PROFILE_FUNCTION(); - if(!m_is_init) - return; - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - m_vbo[i].Destroy(); - m_ibo.Destroy(); - m_is_init = false; - } - - Text::~Text() - { - Destroy(); - } -} diff --git a/runtime/Sources/Renderer/Texts/TextDescriptor.cpp b/runtime/Sources/Renderer/Texts/TextDescriptor.cpp deleted file mode 100644 index b4a3208..0000000 --- a/runtime/Sources/Renderer/Texts/TextDescriptor.cpp +++ /dev/null @@ -1,107 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* TextDescriptor.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/11 00:23:11 by maldavid #+# #+# */ -/* Updated: 2024/04/24 01:38:40 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include -#include -#include - -#define STB_RECT_PACK_IMPLEMENTATION -#include - -#include - -#define STB_TRUETYPE_IMPLEMENTATION -#define STB_malloc(x, u) ((void)(u), MemManager::Malloc(x)) -#define STB_free(x, u) ((void)(u), MemManager::Free(x)) -#include - -constexpr const int RANGE = 1024; - -namespace mlx -{ - TextDrawDescriptor::TextDrawDescriptor(std::string text, std::uint32_t _color, int _x, int _y) : color(_color), x(_x), y(_y), m_text(std::move(text)) - {} - - void TextDrawDescriptor::Init(FontID font) noexcept - { - MLX_PROFILE_FUNCTION(); - std::vector vertex_data; - std::vector index_data; - - float stb_x = 0.0f; - float stb_y = 0.0f; - - { - std::shared_ptr font_data = FontLibrary::Get().GetFontData(font); - - for(char c : m_text) - { - if(c < 32) - continue; - - stbtt_aligned_quad q; - stbtt_GetPackedQuad(font_data->GetCharData().data(), RANGE, RANGE, c - 32, &stb_x, &stb_y, &q, 1); - - std::size_t index = vertex_data.size(); - - glm::vec4 vertex_color = { - static_cast((color & 0x000000FF)) / 255.f, - static_cast((color & 0x0000FF00) >> 8) / 255.f, - static_cast((color & 0x00FF0000) >> 16) / 255.f, - static_cast((color & 0xFF000000) >> 24) / 255.f - }; - - vertex_data.emplace_back(glm::vec2{q.x0, q.y0}, vertex_color, glm::vec2{q.s0, q.t0}); - vertex_data.emplace_back(glm::vec2{q.x1, q.y0}, vertex_color, glm::vec2{q.s1, q.t0}); - vertex_data.emplace_back(glm::vec2{q.x1, q.y1}, vertex_color, glm::vec2{q.s1, q.t1}); - vertex_data.emplace_back(glm::vec2{q.x0, q.y1}, vertex_color, glm::vec2{q.s0, q.t1}); - - index_data.emplace_back(index + 0); - index_data.emplace_back(index + 1); - index_data.emplace_back(index + 2); - index_data.emplace_back(index + 2); - index_data.emplace_back(index + 3); - index_data.emplace_back(index + 0); - } - } - std::shared_ptr text_data = std::make_shared(); - text_data->Init(m_text, font, color, std::move(vertex_data), std::move(index_data)); - id = TextLibrary::Get().AddTextToLibrary(text_data); - DebugLog("Text put : registered new text to render"); - } - - void TextDrawDescriptor::Render(Renderer& renderer) - { - MLX_PROFILE_FUNCTION(); - std::shared_ptr draw_data = TextLibrary::Get().GetTextData(id); - std::shared_ptr font_data = FontLibrary::Get().GetFontData(draw_data->GetFontInUse()); - TextureAtlas& atlas = const_cast(font_data->GetAtlas()); - draw_data->Bind(renderer); - if(!atlas.GetSet().IsInit()) - atlas.SetDescriptor(renderer.GetFragDescriptorSet().Duplicate()); - if(!atlas.HasBeenUpdated()) - atlas.UpdateSet(0); - atlas.GetSet().Bind(); - atlas.Render(renderer, x, y, draw_data->GetIBOsize()); - } - - void TextDrawDescriptor::ResetUpdate() - { - std::shared_ptr draw_data = TextLibrary::Get().GetTextData(id); - std::shared_ptr font_data = FontLibrary::Get().GetFontData(draw_data->GetFontInUse()); - TextureAtlas& atlas = const_cast(font_data->GetAtlas()); - atlas.ResetUpdate(); - } -} diff --git a/runtime/Sources/Renderer/Texts/TextLibrary.cpp b/runtime/Sources/Renderer/Texts/TextLibrary.cpp deleted file mode 100644 index 5f995d9..0000000 --- a/runtime/Sources/Renderer/Texts/TextLibrary.cpp +++ /dev/null @@ -1,62 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* TextLibrary.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/10 11:59:57 by maldavid #+# #+# */ -/* Updated: 2024/04/24 01:40:28 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include -#include - -namespace mlx -{ - std::shared_ptr TextLibrary::GetTextData(TextID id) - { - MLX_PROFILE_FUNCTION(); - if(!m_cache.count(id)) - FatalError("Text Library : wrong text ID '%d'", id); - return m_cache[id]; - } - - TextID TextLibrary::AddTextToLibrary(std::shared_ptr text) - { - MLX_PROFILE_FUNCTION(); - auto it = std::find_if(m_cache.begin(), m_cache.end(), [&](const std::pair>& v) - { - return v.second->GetText() == text->GetText() && v.second->GetColor() == text->GetColor(); - }); - if(it != m_cache.end()) - return it->first; - m_cache[m_current_id] = text; - m_current_id++; - return m_current_id - 1; - } - - void TextLibrary::RemoveTextFromLibrary(TextID id) - { - MLX_PROFILE_FUNCTION(); - if(!m_cache.count(id)) - { - Warning("Text Library : trying to remove a text with an unkown or invalid ID '%d'", id); - return; - } - m_cache[id]->Destroy(); - m_cache.erase(id); - } - - void TextLibrary::ClearLibrary() - { - MLX_PROFILE_FUNCTION(); - for(auto& [id, text] : m_cache) - text->Destroy(); - m_cache.clear(); - } -} diff --git a/runtime/Sources/Renderer/Texts/TextManager.cpp b/runtime/Sources/Renderer/Texts/TextManager.cpp deleted file mode 100644 index b660ea2..0000000 --- a/runtime/Sources/Renderer/Texts/TextManager.cpp +++ /dev/null @@ -1,65 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* TextManager.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/06 16:41:13 by maldavid #+# #+# */ -/* Updated: 2024/04/24 01:42:19 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#include -#include -#include -#include -#include - -namespace mlx -{ - void TextManager::Init(Renderer& renderer) noexcept - { - MLX_PROFILE_FUNCTION(); - LoadFont(renderer, "default", 6.f); - } - - void TextManager::LoadFont(Renderer& renderer, const std::filesystem::path& filepath, float scale) - { - MLX_PROFILE_FUNCTION(); - std::shared_ptr font; - if(filepath.string() == "default") - font = std::make_shared(renderer, "default", dogica_ttf, scale); - else - font = std::make_shared(renderer, filepath, scale); - m_font_in_use = FontLibrary::Get().AddFontToLibrary(font); - } - - std::pair TextManager::RegisterText(int x, int y, std::uint32_t color, std::string str) - { - MLX_PROFILE_FUNCTION(); - auto res = m_text_descriptors.emplace(std::move(str), color, x, y); - if(res.second) - { - const_cast(*res.first).Init(m_font_in_use); - return std::make_pair(static_cast(&const_cast(*res.first)), true); - } - - auto text_ptr = TextLibrary::Get().GetTextData(res.first->id); - if(_font_in_use != text_ptr->GetFontInUse()) - { - // TODO : update text vertex buffers rather than destroying it and recreating it - TextLibrary::Get().RemoveTextFromLibrary(res.first->id); - const_cast(*res.first).Init(_font_in_use); - } - return std::make_pair(static_cast(&const_cast(*res.first)), false); - } - - void TextManager::Destroy() noexcept - { - MLX_PROFILE_FUNCTION(); - m_text_descriptors.clear(); - } -} diff --git a/runtime/Sources/Renderer/Vulkan/VulkanLoader.cpp b/runtime/Sources/Renderer/Vulkan/VulkanLoader.cpp new file mode 100644 index 0000000..df743d1 --- /dev/null +++ b/runtime/Sources/Renderer/Vulkan/VulkanLoader.cpp @@ -0,0 +1,416 @@ +#include +#include + +#ifdef _WIN32 + __declspec(dllimport) HMODULE __stdcall LoadLibraryA(LPCSTR); + __declspec(dllimport) FARPROC __stdcall GetProcAddress(HMODULE, LPCSTR); + __declspec(dllimport) int __stdcall FreeLibrary(HMODULE); +#endif + +#if defined(MLX_COMPILER_GCC) + #define DISABLE_GCC_PEDANTIC_WARNINGS \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wpedantic\"") + #define RESTORE_GCC_PEDANTIC_WARNINGS \ + _Pragma("GCC diagnostic pop") +#else + #define DISABLE_GCC_PEDANTIC_WARNINGS + #define RESTORE_GCC_PEDANTIC_WARNINGS +#endif + +namespace mlx +{ + namespace Internal + { + static PFN_vkVoidFunction vkGetInstanceProcAddrStub(Handle context, const char* name) + { + return vkGetInstanceProcAddr((VkInstance)context, name); + } + } + + VulkanLoader::VulkanLoader() + { + #if defined(_WIN32) + p_module = LoadLibraryA("vulkan-1.dll"); + if(!p_module) + FatalError("Vulkan loader : failed to load libvulkan"); + vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)(void(*)(void))GetProcAddress(p_module, "vkGetInstanceProcAddr"); + #elif defined(__APPLE__) + p_module = dlopen("libvulkan.dylib", RTLD_NOW | RTLD_LOCAL); + if(!p_module) + p_module = dlopen("libvulkan.1.dylib", RTLD_NOW | RTLD_LOCAL); + if(!p_module) + p_module = dlopen("libMoltenVK.dylib", RTLD_NOW | RTLD_LOCAL); + + // Add support for using Vulkan and MoltenVK in a Framework. App store rules for iOS + // strictly enforce no .dylib's. If they aren't found it just falls through + if(!p_module) + p_module = dlopen("vulkan.framework/vulkan", RTLD_NOW | RTLD_LOCAL); + if(!p_module) + p_module = dlopen("MoltenVK.framework/MoltenVK", RTLD_NOW | RTLD_LOCAL); + + // modern versions of macOS don't search /usr/local/lib automatically contrary to what man dlopen says + // Vulkan SDK uses this as the system-wide installation location, so we're going to fallback to this if all else fails + if(!p_module && getenv("DYLD_FALLBACK_LIBRARY_PATH") == NULL) + p_module = dlopen("/usr/local/lib/libvulkan.dylib", RTLD_NOW | RTLD_LOCAL); + if(!p_module) + FatalError("Vulkan loader : failed to load libvulkan"); + + vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)dlsym(p_module, "vkGetInstanceProcAddr"); + #else + p_module = dlopen("libvulkan.so.1", RTLD_NOW | RTLD_LOCAL); + if(!p_module) + p_module = dlopen("libvulkan.so", RTLD_NOW | RTLD_LOCAL); + if(!p_module) + FatalError("Vulkan loader : failed to load libvulkan"); + DISABLE_GCC_PEDANTIC_WARNINGS + vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)dlsym(p_module, "vkGetInstanceProcAddr"); + RESTORE_GCC_PEDANTIC_WARNINGS + #endif + DebugLog("Vulkan loader : libvulkan loaded"); + LoadGlobalFunctions(nullptr, Internal::vkGetInstanceProcAddrStub); + } + + void VulkanLoader::LoadInstance(VkInstance instance) + { + LoadInstanceFunctions(instance, Internal::vkGetInstanceProcAddrStub); + LoadDeviceFunctions(instance, Internal::vkGetInstanceProcAddrStub); + } + + void VulkanLoader::LoadGlobalFunctions(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) noexcept + { + #if defined(VK_VERSION_1_0) + vkCreateInstance = (PFN_vkCreateInstance)load(context, "vkCreateInstance"); + vkEnumerateInstanceExtensionProperties = (PFN_vkEnumerateInstanceExtensionProperties)load(context, "vkEnumerateInstanceExtensionProperties"); + vkEnumerateInstanceLayerProperties = (PFN_vkEnumerateInstanceLayerProperties)load(context, "vkEnumerateInstanceLayerProperties"); + #endif /* defined(VK_VERSION_1_0) */ + DebugLog("Vulkan loader : global functions loaded"); + } + + void VulkanLoader::LoadInstanceFunctions(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) noexcept + { + #if defined(VK_VERSION_1_0) + vkCreateDevice = (PFN_vkCreateDevice)load(context, "vkCreateDevice"); + vkDestroyInstance = (PFN_vkDestroyInstance)load(context, "vkDestroyInstance"); + vkEnumerateDeviceExtensionProperties = (PFN_vkEnumerateDeviceExtensionProperties)load(context, "vkEnumerateDeviceExtensionProperties"); + vkEnumerateDeviceLayerProperties = (PFN_vkEnumerateDeviceLayerProperties)load(context, "vkEnumerateDeviceLayerProperties"); + vkEnumeratePhysicalDevices = (PFN_vkEnumeratePhysicalDevices)load(context, "vkEnumeratePhysicalDevices"); + vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)load(context, "vkGetDeviceProcAddr"); + vkGetPhysicalDeviceFeatures = (PFN_vkGetPhysicalDeviceFeatures)load(context, "vkGetPhysicalDeviceFeatures"); + vkGetPhysicalDeviceFormatProperties = (PFN_vkGetPhysicalDeviceFormatProperties)load(context, "vkGetPhysicalDeviceFormatProperties"); + vkGetPhysicalDeviceImageFormatProperties = (PFN_vkGetPhysicalDeviceImageFormatProperties)load(context, "vkGetPhysicalDeviceImageFormatProperties"); + vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)load(context, "vkGetPhysicalDeviceMemoryProperties"); + vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)load(context, "vkGetPhysicalDeviceProperties"); + vkGetPhysicalDeviceQueueFamilyProperties = (PFN_vkGetPhysicalDeviceQueueFamilyProperties)load(context, "vkGetPhysicalDeviceQueueFamilyProperties"); + vkGetPhysicalDeviceSparseImageFormatProperties = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties)load(context, "vkGetPhysicalDeviceSparseImageFormatProperties"); + #endif /* defined(VK_VERSION_1_0) */ + #if defined(VK_KHR_surface) + vkDestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)load(context, "vkDestroySurfaceKHR"); + vkGetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)load(context, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); + vkGetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)load(context, "vkGetPhysicalDeviceSurfaceFormatsKHR"); + vkGetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)load(context, "vkGetPhysicalDeviceSurfacePresentModesKHR"); + vkGetPhysicalDeviceSurfaceSupportKHR = (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)load(context, "vkGetPhysicalDeviceSurfaceSupportKHR"); + #endif /* defined(VK_KHR_surface) */ + DebugLog("Vulkan loader : instance functions loaded"); + } + + void VulkanLoader::LoadDeviceFunctions(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) noexcept + { + #if defined(VK_VERSION_1_0) + vkAllocateCommandBuffers = (PFN_vkAllocateCommandBuffers)load(context, "vkAllocateCommandBuffers"); + vkAllocateDescriptorSets = (PFN_vkAllocateDescriptorSets)load(context, "vkAllocateDescriptorSets"); + vkAllocateMemory = (PFN_vkAllocateMemory)load(context, "vkAllocateMemory"); + vkBeginCommandBuffer = (PFN_vkBeginCommandBuffer)load(context, "vkBeginCommandBuffer"); + vkBindBufferMemory = (PFN_vkBindBufferMemory)load(context, "vkBindBufferMemory"); + vkBindImageMemory = (PFN_vkBindImageMemory)load(context, "vkBindImageMemory"); + vkCmdBeginQuery = (PFN_vkCmdBeginQuery)load(context, "vkCmdBeginQuery"); + vkCmdBeginRenderPass = (PFN_vkCmdBeginRenderPass)load(context, "vkCmdBeginRenderPass"); + vkCmdBindDescriptorSets = (PFN_vkCmdBindDescriptorSets)load(context, "vkCmdBindDescriptorSets"); + vkCmdBindIndexBuffer = (PFN_vkCmdBindIndexBuffer)load(context, "vkCmdBindIndexBuffer"); + vkCmdBindPipeline = (PFN_vkCmdBindPipeline)load(context, "vkCmdBindPipeline"); + vkCmdBindVertexBuffers = (PFN_vkCmdBindVertexBuffers)load(context, "vkCmdBindVertexBuffers"); + vkCmdBlitImage = (PFN_vkCmdBlitImage)load(context, "vkCmdBlitImage"); + vkCmdClearAttachments = (PFN_vkCmdClearAttachments)load(context, "vkCmdClearAttachments"); + vkCmdClearColorImage = (PFN_vkCmdClearColorImage)load(context, "vkCmdClearColorImage"); + vkCmdClearDepthStencilImage = (PFN_vkCmdClearDepthStencilImage)load(context, "vkCmdClearDepthStencilImage"); + vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)load(context, "vkCmdCopyBuffer"); + vkCmdCopyBufferToImage = (PFN_vkCmdCopyBufferToImage)load(context, "vkCmdCopyBufferToImage"); + vkCmdCopyImage = (PFN_vkCmdCopyImage)load(context, "vkCmdCopyImage"); + vkCmdCopyImageToBuffer = (PFN_vkCmdCopyImageToBuffer)load(context, "vkCmdCopyImageToBuffer"); + vkCmdCopyQueryPoolResults = (PFN_vkCmdCopyQueryPoolResults)load(context, "vkCmdCopyQueryPoolResults"); + vkCmdDispatch = (PFN_vkCmdDispatch)load(context, "vkCmdDispatch"); + vkCmdDispatchIndirect = (PFN_vkCmdDispatchIndirect)load(context, "vkCmdDispatchIndirect"); + vkCmdDraw = (PFN_vkCmdDraw)load(context, "vkCmdDraw"); + vkCmdDrawIndexed = (PFN_vkCmdDrawIndexed)load(context, "vkCmdDrawIndexed"); + vkCmdDrawIndexedIndirect = (PFN_vkCmdDrawIndexedIndirect)load(context, "vkCmdDrawIndexedIndirect"); + vkCmdDrawIndirect = (PFN_vkCmdDrawIndirect)load(context, "vkCmdDrawIndirect"); + vkCmdEndQuery = (PFN_vkCmdEndQuery)load(context, "vkCmdEndQuery"); + vkCmdEndRenderPass = (PFN_vkCmdEndRenderPass)load(context, "vkCmdEndRenderPass"); + vkCmdExecuteCommands = (PFN_vkCmdExecuteCommands)load(context, "vkCmdExecuteCommands"); + vkCmdFillBuffer = (PFN_vkCmdFillBuffer)load(context, "vkCmdFillBuffer"); + vkCmdNextSubpass = (PFN_vkCmdNextSubpass)load(context, "vkCmdNextSubpass"); + vkCmdPipelineBarrier = (PFN_vkCmdPipelineBarrier)load(context, "vkCmdPipelineBarrier"); + vkCmdPushConstants = (PFN_vkCmdPushConstants)load(context, "vkCmdPushConstants"); + vkCmdResetEvent = (PFN_vkCmdResetEvent)load(context, "vkCmdResetEvent"); + vkCmdResetQueryPool = (PFN_vkCmdResetQueryPool)load(context, "vkCmdResetQueryPool"); + vkCmdResolveImage = (PFN_vkCmdResolveImage)load(context, "vkCmdResolveImage"); + vkCmdSetBlendConstants = (PFN_vkCmdSetBlendConstants)load(context, "vkCmdSetBlendConstants"); + vkCmdSetDepthBias = (PFN_vkCmdSetDepthBias)load(context, "vkCmdSetDepthBias"); + vkCmdSetDepthBounds = (PFN_vkCmdSetDepthBounds)load(context, "vkCmdSetDepthBounds"); + vkCmdSetEvent = (PFN_vkCmdSetEvent)load(context, "vkCmdSetEvent"); + vkCmdSetLineWidth = (PFN_vkCmdSetLineWidth)load(context, "vkCmdSetLineWidth"); + vkCmdSetScissor = (PFN_vkCmdSetScissor)load(context, "vkCmdSetScissor"); + vkCmdSetStencilCompareMask = (PFN_vkCmdSetStencilCompareMask)load(context, "vkCmdSetStencilCompareMask"); + vkCmdSetStencilReference = (PFN_vkCmdSetStencilReference)load(context, "vkCmdSetStencilReference"); + vkCmdSetStencilWriteMask = (PFN_vkCmdSetStencilWriteMask)load(context, "vkCmdSetStencilWriteMask"); + vkCmdSetViewport = (PFN_vkCmdSetViewport)load(context, "vkCmdSetViewport"); + vkCmdUpdateBuffer = (PFN_vkCmdUpdateBuffer)load(context, "vkCmdUpdateBuffer"); + vkCmdWaitEvents = (PFN_vkCmdWaitEvents)load(context, "vkCmdWaitEvents"); + vkCmdWriteTimestamp = (PFN_vkCmdWriteTimestamp)load(context, "vkCmdWriteTimestamp"); + vkCreateBuffer = (PFN_vkCreateBuffer)load(context, "vkCreateBuffer"); + vkCreateBufferView = (PFN_vkCreateBufferView)load(context, "vkCreateBufferView"); + vkCreateCommandPool = (PFN_vkCreateCommandPool)load(context, "vkCreateCommandPool"); + vkCreateComputePipelines = (PFN_vkCreateComputePipelines)load(context, "vkCreateComputePipelines"); + vkCreateDescriptorPool = (PFN_vkCreateDescriptorPool)load(context, "vkCreateDescriptorPool"); + vkCreateDescriptorSetLayout = (PFN_vkCreateDescriptorSetLayout)load(context, "vkCreateDescriptorSetLayout"); + vkCreateEvent = (PFN_vkCreateEvent)load(context, "vkCreateEvent"); + vkCreateFence = (PFN_vkCreateFence)load(context, "vkCreateFence"); + vkCreateFramebuffer = (PFN_vkCreateFramebuffer)load(context, "vkCreateFramebuffer"); + vkCreateGraphicsPipelines = (PFN_vkCreateGraphicsPipelines)load(context, "vkCreateGraphicsPipelines"); + vkCreateImage = (PFN_vkCreateImage)load(context, "vkCreateImage"); + vkCreateImageView = (PFN_vkCreateImageView)load(context, "vkCreateImageView"); + vkCreatePipelineCache = (PFN_vkCreatePipelineCache)load(context, "vkCreatePipelineCache"); + vkCreatePipelineLayout = (PFN_vkCreatePipelineLayout)load(context, "vkCreatePipelineLayout"); + vkCreateQueryPool = (PFN_vkCreateQueryPool)load(context, "vkCreateQueryPool"); + vkCreateRenderPass = (PFN_vkCreateRenderPass)load(context, "vkCreateRenderPass"); + vkCreateSampler = (PFN_vkCreateSampler)load(context, "vkCreateSampler"); + vkCreateSemaphore = (PFN_vkCreateSemaphore)load(context, "vkCreateSemaphore"); + vkCreateShaderModule = (PFN_vkCreateShaderModule)load(context, "vkCreateShaderModule"); + vkDestroyBuffer = (PFN_vkDestroyBuffer)load(context, "vkDestroyBuffer"); + vkDestroyBufferView = (PFN_vkDestroyBufferView)load(context, "vkDestroyBufferView"); + vkDestroyCommandPool = (PFN_vkDestroyCommandPool)load(context, "vkDestroyCommandPool"); + vkDestroyDescriptorPool = (PFN_vkDestroyDescriptorPool)load(context, "vkDestroyDescriptorPool"); + vkDestroyDescriptorSetLayout = (PFN_vkDestroyDescriptorSetLayout)load(context, "vkDestroyDescriptorSetLayout"); + vkDestroyDevice = (PFN_vkDestroyDevice)load(context, "vkDestroyDevice"); + vkDestroyEvent = (PFN_vkDestroyEvent)load(context, "vkDestroyEvent"); + vkDestroyFence = (PFN_vkDestroyFence)load(context, "vkDestroyFence"); + vkDestroyFramebuffer = (PFN_vkDestroyFramebuffer)load(context, "vkDestroyFramebuffer"); + vkDestroyImage = (PFN_vkDestroyImage)load(context, "vkDestroyImage"); + vkDestroyImageView = (PFN_vkDestroyImageView)load(context, "vkDestroyImageView"); + vkDestroyPipeline = (PFN_vkDestroyPipeline)load(context, "vkDestroyPipeline"); + vkDestroyPipelineCache = (PFN_vkDestroyPipelineCache)load(context, "vkDestroyPipelineCache"); + vkDestroyPipelineLayout = (PFN_vkDestroyPipelineLayout)load(context, "vkDestroyPipelineLayout"); + vkDestroyQueryPool = (PFN_vkDestroyQueryPool)load(context, "vkDestroyQueryPool"); + vkDestroyRenderPass = (PFN_vkDestroyRenderPass)load(context, "vkDestroyRenderPass"); + vkDestroySampler = (PFN_vkDestroySampler)load(context, "vkDestroySampler"); + vkDestroySemaphore = (PFN_vkDestroySemaphore)load(context, "vkDestroySemaphore"); + vkDestroyShaderModule = (PFN_vkDestroyShaderModule)load(context, "vkDestroyShaderModule"); + vkDeviceWaitIdle = (PFN_vkDeviceWaitIdle)load(context, "vkDeviceWaitIdle"); + vkEndCommandBuffer = (PFN_vkEndCommandBuffer)load(context, "vkEndCommandBuffer"); + vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)load(context, "vkFlushMappedMemoryRanges"); + vkFreeCommandBuffers = (PFN_vkFreeCommandBuffers)load(context, "vkFreeCommandBuffers"); + vkFreeDescriptorSets = (PFN_vkFreeDescriptorSets)load(context, "vkFreeDescriptorSets"); + vkFreeMemory = (PFN_vkFreeMemory)load(context, "vkFreeMemory"); + vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)load(context, "vkGetBufferMemoryRequirements"); + vkGetDeviceMemoryCommitment = (PFN_vkGetDeviceMemoryCommitment)load(context, "vkGetDeviceMemoryCommitment"); + vkGetDeviceQueue = (PFN_vkGetDeviceQueue)load(context, "vkGetDeviceQueue"); + vkGetEventStatus = (PFN_vkGetEventStatus)load(context, "vkGetEventStatus"); + vkGetFenceStatus = (PFN_vkGetFenceStatus)load(context, "vkGetFenceStatus"); + vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)load(context, "vkGetImageMemoryRequirements"); + vkGetImageSparseMemoryRequirements = (PFN_vkGetImageSparseMemoryRequirements)load(context, "vkGetImageSparseMemoryRequirements"); + vkGetImageSubresourceLayout = (PFN_vkGetImageSubresourceLayout)load(context, "vkGetImageSubresourceLayout"); + vkGetPipelineCacheData = (PFN_vkGetPipelineCacheData)load(context, "vkGetPipelineCacheData"); + vkGetQueryPoolResults = (PFN_vkGetQueryPoolResults)load(context, "vkGetQueryPoolResults"); + vkGetRenderAreaGranularity = (PFN_vkGetRenderAreaGranularity)load(context, "vkGetRenderAreaGranularity"); + vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)load(context, "vkInvalidateMappedMemoryRanges"); + vkMapMemory = (PFN_vkMapMemory)load(context, "vkMapMemory"); + vkMergePipelineCaches = (PFN_vkMergePipelineCaches)load(context, "vkMergePipelineCaches"); + vkQueueBindSparse = (PFN_vkQueueBindSparse)load(context, "vkQueueBindSparse"); + vkQueueSubmit = (PFN_vkQueueSubmit)load(context, "vkQueueSubmit"); + vkQueueWaitIdle = (PFN_vkQueueWaitIdle)load(context, "vkQueueWaitIdle"); + vkResetCommandBuffer = (PFN_vkResetCommandBuffer)load(context, "vkResetCommandBuffer"); + vkResetCommandPool = (PFN_vkResetCommandPool)load(context, "vkResetCommandPool"); + vkResetDescriptorPool = (PFN_vkResetDescriptorPool)load(context, "vkResetDescriptorPool"); + vkResetEvent = (PFN_vkResetEvent)load(context, "vkResetEvent"); + vkResetFences = (PFN_vkResetFences)load(context, "vkResetFences"); + vkSetEvent = (PFN_vkSetEvent)load(context, "vkSetEvent"); + vkUnmapMemory = (PFN_vkUnmapMemory)load(context, "vkUnmapMemory"); + vkUpdateDescriptorSets = (PFN_vkUpdateDescriptorSets)load(context, "vkUpdateDescriptorSets"); + vkWaitForFences = (PFN_vkWaitForFences)load(context, "vkWaitForFences"); + #endif /* defined(VK_VERSION_1_0) */ + #if defined(VK_KHR_swapchain) + vkAcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)load(context, "vkAcquireNextImageKHR"); + vkCreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)load(context, "vkCreateSwapchainKHR"); + vkDestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)load(context, "vkDestroySwapchainKHR"); + vkGetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)load(context, "vkGetSwapchainImagesKHR"); + vkQueuePresentKHR = (PFN_vkQueuePresentKHR)load(context, "vkQueuePresentKHR"); + #endif /* defined(VK_KHR_swapchain) */ + + DebugLog("Vulkan loader : device functions loaded"); + } + + VulkanLoader::~VulkanLoader() + { + #if defined(_WIN32) + FreeLibrary((HMODULE)p_module); + #else + dlclose(p_module); + #endif + p_module = nullptr; + DebugLog("Vulkan loader : libvulkan unloaded"); + } +} + +#if defined(VK_VERSION_1_0) + PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; + PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; + PFN_vkAllocateMemory vkAllocateMemory; + PFN_vkBeginCommandBuffer vkBeginCommandBuffer; + PFN_vkBindBufferMemory vkBindBufferMemory; + PFN_vkBindImageMemory vkBindImageMemory; + PFN_vkCmdBeginQuery vkCmdBeginQuery; + PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass; + PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; + PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; + PFN_vkCmdBindPipeline vkCmdBindPipeline; + PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers; + PFN_vkCmdBlitImage vkCmdBlitImage; + PFN_vkCmdClearAttachments vkCmdClearAttachments; + PFN_vkCmdClearColorImage vkCmdClearColorImage; + PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage; + PFN_vkCmdCopyBuffer vkCmdCopyBuffer; + PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; + PFN_vkCmdCopyImage vkCmdCopyImage; + PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; + PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults; + PFN_vkCmdDispatch vkCmdDispatch; + PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect; + PFN_vkCmdDraw vkCmdDraw; + PFN_vkCmdDrawIndexed vkCmdDrawIndexed; + PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect; + PFN_vkCmdDrawIndirect vkCmdDrawIndirect; + PFN_vkCmdEndQuery vkCmdEndQuery; + PFN_vkCmdEndRenderPass vkCmdEndRenderPass; + PFN_vkCmdExecuteCommands vkCmdExecuteCommands; + PFN_vkCmdFillBuffer vkCmdFillBuffer; + PFN_vkCmdNextSubpass vkCmdNextSubpass; + PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; + PFN_vkCmdPushConstants vkCmdPushConstants; + PFN_vkCmdResetEvent vkCmdResetEvent; + PFN_vkCmdResetQueryPool vkCmdResetQueryPool; + PFN_vkCmdResolveImage vkCmdResolveImage; + PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; + PFN_vkCmdSetDepthBias vkCmdSetDepthBias; + PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; + PFN_vkCmdSetEvent vkCmdSetEvent; + PFN_vkCmdSetLineWidth vkCmdSetLineWidth; + PFN_vkCmdSetScissor vkCmdSetScissor; + PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; + PFN_vkCmdSetStencilReference vkCmdSetStencilReference; + PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; + PFN_vkCmdSetViewport vkCmdSetViewport; + PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer; + PFN_vkCmdWaitEvents vkCmdWaitEvents; + PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp; + PFN_vkCreateBuffer vkCreateBuffer; + PFN_vkCreateBufferView vkCreateBufferView; + PFN_vkCreateCommandPool vkCreateCommandPool; + PFN_vkCreateComputePipelines vkCreateComputePipelines; + PFN_vkCreateDescriptorPool vkCreateDescriptorPool; + PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; + PFN_vkCreateDevice vkCreateDevice; + PFN_vkCreateEvent vkCreateEvent; + PFN_vkCreateFence vkCreateFence; + PFN_vkCreateFramebuffer vkCreateFramebuffer; + PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; + PFN_vkCreateImage vkCreateImage; + PFN_vkCreateImageView vkCreateImageView; + PFN_vkCreateInstance vkCreateInstance; + PFN_vkCreatePipelineCache vkCreatePipelineCache; + PFN_vkCreatePipelineLayout vkCreatePipelineLayout; + PFN_vkCreateQueryPool vkCreateQueryPool; + PFN_vkCreateRenderPass vkCreateRenderPass; + PFN_vkCreateSampler vkCreateSampler; + PFN_vkCreateSemaphore vkCreateSemaphore; + PFN_vkCreateShaderModule vkCreateShaderModule; + PFN_vkDestroyBuffer vkDestroyBuffer; + PFN_vkDestroyBufferView vkDestroyBufferView; + PFN_vkDestroyCommandPool vkDestroyCommandPool; + PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; + PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; + PFN_vkDestroyDevice vkDestroyDevice; + PFN_vkDestroyEvent vkDestroyEvent; + PFN_vkDestroyFence vkDestroyFence; + PFN_vkDestroyFramebuffer vkDestroyFramebuffer; + PFN_vkDestroyImage vkDestroyImage; + PFN_vkDestroyImageView vkDestroyImageView; + PFN_vkDestroyInstance vkDestroyInstance; + PFN_vkDestroyPipeline vkDestroyPipeline; + PFN_vkDestroyPipelineCache vkDestroyPipelineCache; + PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; + PFN_vkDestroyQueryPool vkDestroyQueryPool; + PFN_vkDestroyRenderPass vkDestroyRenderPass; + PFN_vkDestroySampler vkDestroySampler; + PFN_vkDestroySemaphore vkDestroySemaphore; + PFN_vkDestroyShaderModule vkDestroyShaderModule; + PFN_vkDeviceWaitIdle vkDeviceWaitIdle; + PFN_vkEndCommandBuffer vkEndCommandBuffer; + PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties; + PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties; + PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties; + PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties; + PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices; + PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; + PFN_vkFreeCommandBuffers vkFreeCommandBuffers; + PFN_vkFreeDescriptorSets vkFreeDescriptorSets; + PFN_vkFreeMemory vkFreeMemory; + PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; + PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment; + PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; + PFN_vkGetDeviceQueue vkGetDeviceQueue; + PFN_vkGetEventStatus vkGetEventStatus; + PFN_vkGetFenceStatus vkGetFenceStatus; + PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; + PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements; + PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout; + PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; + PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures; + PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties; + PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties; + PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; + PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; + PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties; + PFN_vkGetPipelineCacheData vkGetPipelineCacheData; + PFN_vkGetQueryPoolResults vkGetQueryPoolResults; + PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity; + PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; + PFN_vkMapMemory vkMapMemory; + PFN_vkMergePipelineCaches vkMergePipelineCaches; + PFN_vkQueueBindSparse vkQueueBindSparse; + PFN_vkQueueSubmit vkQueueSubmit; + PFN_vkQueueWaitIdle vkQueueWaitIdle; + PFN_vkResetCommandBuffer vkResetCommandBuffer; + PFN_vkResetCommandPool vkResetCommandPool; + PFN_vkResetDescriptorPool vkResetDescriptorPool; + PFN_vkResetEvent vkResetEvent; + PFN_vkResetFences vkResetFences; + PFN_vkSetEvent vkSetEvent; + PFN_vkUnmapMemory vkUnmapMemory; + PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; + PFN_vkWaitForFences vkWaitForFences; +#endif /* defined(VK_VERSION_1_0) */ +#if defined(VK_KHR_swapchain) + PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; + PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; + PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; + PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; + PFN_vkQueuePresentKHR vkQueuePresentKHR; +#endif /* defined(VK_KHR_swapchain) */ +#if defined(VK_KHR_surface) + PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; + PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR; + PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR; + PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR; + PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; +#endif /* defined(VK_KHR_surface) */ diff --git a/runtime/Sources/Renderer/Vulkan/VulkanLoader.h b/runtime/Sources/Renderer/Vulkan/VulkanLoader.h new file mode 100644 index 0000000..c6a7274 --- /dev/null +++ b/runtime/Sources/Renderer/Vulkan/VulkanLoader.h @@ -0,0 +1,42 @@ +#ifndef __MLX_VULKAN_LOADER__ +#define __MLX_VULKAN_LOADER__ + +#ifdef _WIN32 + typedef const char* LPCSTR; + typedef struct HINSTANCE__* HINSTANCE; + typedef HINSTANCE HMODULE; + #if defined(_MINWINDEF_) + /* minwindef.h defines FARPROC, and attempting to redefine it may conflict with -Wstrict-prototypes */ + #elif defined(_WIN64) + typedef __int64 (__stdcall* FARPROC)(void); + #else + typedef int (__stdcall* FARPROC)(void); + #endif +#else + #include +#endif + +namespace mlx +{ + class VulkanLoader + { + public: + VulkanLoader(); + void LoadInstance(VkInstance instance); + ~VulkanLoader(); + + private: + void LoadGlobalFunctions(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) noexcept; + void LoadInstanceFunctions(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) noexcept; + void LoadDeviceFunctions(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) noexcept; + + private: + #ifdef _WIN32 + HMODULE p_module = nullptr; + #else + Handle p_module = nullptr; + #endif + }; +} + +#endif diff --git a/scripts/fetch_dependencies.sh b/scripts/fetch_dependencies.sh index d40d1b2..ba8a0aa 100755 --- a/scripts/fetch_dependencies.sh +++ b/scripts/fetch_dependencies.sh @@ -1,16 +1,5 @@ #!/bin/bash -# Update volk -rm -f ../third_party/volk.c -rm -f ../third_party/volk.h -tag_name=$(curl -sL https://api.github.com/repos/zeux/Volk/releases/latest | jq -r '.tag_name') -wget https://api.github.com/repos/zeux/volk/zipball/$tag_name -O volk.zip -unzip -o volk.zip -d ../third_party/ -mv ../third_party/zeux-volk*/volk.h ../third_party -mv ../third_party/zeux-volk*/volk.c ../third_party -rm -rf ../third_party/zeux-volk* -rm volk.zip - # Update VMA rm -f ../third_party/vma.h tag_name=$(curl -sL https://api.github.com/repos/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator/releases/latest | jq -r '.tag_name') @@ -29,3 +18,9 @@ mv ../third_party/Vulkan-Headers-main/include/vulkan ../third_party/ mv ../third_party/Vulkan-Headers-main/include/vk_video ../third_party/ rm -rf ../third_party/Vulkan-Headers-main rm vulkan-headers.zip + +# Update KVF +rm -f ../third_party/kvf.h +git clone https://github.com/Kbz-8/KVF.git ../third_party/KVF/ +mv ../third_party/KVF/kvf.h ../third_party/kvf.h +rm -rf ../third_party/KVF diff --git a/third_party/glm/common.hpp b/third_party/glm/common.hpp deleted file mode 100755 index 0328dc9..0000000 --- a/third_party/glm/common.hpp +++ /dev/null @@ -1,539 +0,0 @@ -/// @ref core -/// @file glm/common.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.3 Common Functions -/// -/// @defgroup core_func_common Common functions -/// @ingroup core -/// -/// Provides GLSL common functions -/// -/// These all operate component-wise. The description is per component. -/// -/// Include to use these core features. - -#pragma once - -#include "detail/qualifier.hpp" -#include "detail/_fixes.hpp" - -namespace glm -{ - /// @addtogroup core_func_common - /// @{ - - /// Returns x if x >= 0; otherwise, it returns -x. - /// - /// @tparam genType floating-point or signed integer; scalar or vector types. - /// - /// @see GLSL abs man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR genType abs(genType x); - - /// Returns x if x >= 0; otherwise, it returns -x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or signed integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL abs man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec abs(vec const& x); - - /// Returns 1.0 if x > 0, 0.0 if x == 0, or -1.0 if x < 0. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL sign man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec sign(vec const& x); - - /// Returns a value equal to the nearest integer that is less then or equal to x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL floor man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec floor(vec const& x); - - /// Returns a value equal to the nearest integer to x - /// whose absolute value is not larger than the absolute value of x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL trunc man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec trunc(vec const& x); - - /// Returns a value equal to the nearest integer to x. - /// The fraction 0.5 will round in a direction chosen by the - /// implementation, presumably the direction that is fastest. - /// This includes the possibility that round(x) returns the - /// same value as roundEven(x) for all values of x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL round man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec round(vec const& x); - - /// Returns a value equal to the nearest integer to x. - /// A fractional part of 0.5 will round toward the nearest even - /// integer. (Both 3.5 and 4.5 for x will return 4.0.) - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL roundEven man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - /// @see New round to even technique - template - GLM_FUNC_DECL vec roundEven(vec const& x); - - /// Returns a value equal to the nearest integer - /// that is greater than or equal to x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL ceil man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec ceil(vec const& x); - - /// Return x - floor(x). - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see GLSL fract man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL genType fract(genType x); - - /// Return x - floor(x). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL fract man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec fract(vec const& x); - - template - GLM_FUNC_DECL genType mod(genType x, genType y); - - template - GLM_FUNC_DECL vec mod(vec const& x, T y); - - /// Modulus. Returns x - y * floor(x / y) - /// for each component in x using the floating point value y. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types, include glm/gtc/integer for integer scalar types support - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL mod man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec mod(vec const& x, vec const& y); - - /// Returns the fractional part of x and sets i to the integer - /// part (as a whole number floating point value). Both the - /// return value and the output parameter will have the same - /// sign as x. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see GLSL modf man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL genType modf(genType x, genType& i); - - /// Returns y if y < x; otherwise, it returns x. - /// - /// @tparam genType Floating-point or integer; scalar or vector types. - /// - /// @see GLSL min man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR genType min(genType x, genType y); - - /// Returns y if y < x; otherwise, it returns x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL min man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& x, T y); - - /// Returns y if y < x; otherwise, it returns x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL min man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& x, vec const& y); - - /// Returns y if x < y; otherwise, it returns x. - /// - /// @tparam genType Floating-point or integer; scalar or vector types. - /// - /// @see GLSL max man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR genType max(genType x, genType y); - - /// Returns y if x < y; otherwise, it returns x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL max man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec max(vec const& x, T y); - - /// Returns y if x < y; otherwise, it returns x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL max man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec max(vec const& x, vec const& y); - - /// Returns min(max(x, minVal), maxVal) for each component in x - /// using the floating-point values minVal and maxVal. - /// - /// @tparam genType Floating-point or integer; scalar or vector types. - /// - /// @see GLSL clamp man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR genType clamp(genType x, genType minVal, genType maxVal); - - /// Returns min(max(x, minVal), maxVal) for each component in x - /// using the floating-point values minVal and maxVal. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL clamp man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec clamp(vec const& x, T minVal, T maxVal); - - /// Returns min(max(x, minVal), maxVal) for each component in x - /// using the floating-point values minVal and maxVal. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL clamp man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec clamp(vec const& x, vec const& minVal, vec const& maxVal); - - /// If genTypeU is a floating scalar or vector: - /// Returns x * (1.0 - a) + y * a, i.e., the linear blend of - /// x and y using the floating-point value a. - /// The value for a is not restricted to the range [0, 1]. - /// - /// If genTypeU is a boolean scalar or vector: - /// Selects which vector each returned component comes - /// from. For a component of 'a' that is false, the - /// corresponding component of 'x' is returned. For a - /// component of 'a' that is true, the corresponding - /// component of 'y' is returned. Components of 'x' and 'y' that - /// are not selected are allowed to be invalid floating point - /// values and will have no effect on the results. Thus, this - /// provides different functionality than - /// genType mix(genType x, genType y, genType(a)) - /// where a is a Boolean vector. - /// - /// @see GLSL mix man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - /// - /// @param[in] x Value to interpolate. - /// @param[in] y Value to interpolate. - /// @param[in] a Interpolant. - /// - /// @tparam genTypeT Floating point scalar or vector. - /// @tparam genTypeU Floating point or boolean scalar or vector. It can't be a vector if it is the length of genTypeT. - /// - /// @code - /// #include - /// ... - /// float a; - /// bool b; - /// glm::dvec3 e; - /// glm::dvec3 f; - /// glm::vec4 g; - /// glm::vec4 h; - /// ... - /// glm::vec4 r = glm::mix(g, h, a); // Interpolate with a floating-point scalar two vectors. - /// glm::vec4 s = glm::mix(g, h, b); // Returns g or h; - /// glm::dvec3 t = glm::mix(e, f, a); // Types of the third parameter is not required to match with the first and the second. - /// glm::vec4 u = glm::mix(g, h, r); // Interpolations can be perform per component with a vector for the last parameter. - /// @endcode - template - GLM_FUNC_DECL genTypeT mix(genTypeT x, genTypeT y, genTypeU a); - - template - GLM_FUNC_DECL vec mix(vec const& x, vec const& y, vec const& a); - - template - GLM_FUNC_DECL vec mix(vec const& x, vec const& y, U a); - - /// Returns 0.0 if x < edge, otherwise it returns 1.0 for each component of a genType. - /// - /// @see GLSL step man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL genType step(genType edge, genType x); - - /// Returns 0.0 if x < edge, otherwise it returns 1.0. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL step man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec step(T edge, vec const& x); - - /// Returns 0.0 if x < edge, otherwise it returns 1.0. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL step man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec step(vec const& edge, vec const& x); - - /// Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and - /// performs smooth Hermite interpolation between 0 and 1 - /// when edge0 < x < edge1. This is useful in cases where - /// you would want a threshold function with a smooth - /// transition. This is equivalent to: - /// genType t; - /// t = clamp ((x - edge0) / (edge1 - edge0), 0, 1); - /// return t * t * (3 - 2 * t); - /// Results are undefined if edge0 >= edge1. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see GLSL smoothstep man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL genType smoothstep(genType edge0, genType edge1, genType x); - - template - GLM_FUNC_DECL vec smoothstep(T edge0, T edge1, vec const& x); - - template - GLM_FUNC_DECL vec smoothstep(vec const& edge0, vec const& edge1, vec const& x); - - /// Returns true if x holds a NaN (not a number) - /// representation in the underlying implementation's set of - /// floating point representations. Returns false otherwise, - /// including for implementations with no NaN - /// representations. - /// - /// /!\ When using compiler fast math, this function may fail. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL isnan man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec isnan(vec const& x); - - /// Returns true if x holds a positive infinity or negative - /// infinity representation in the underlying implementation's - /// set of floating point representations. Returns false - /// otherwise, including for implementations with no infinity - /// representations. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL isinf man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec isinf(vec const& x); - - /// Returns a signed integer value representing - /// the encoding of a floating-point value. The floating-point - /// value's bit-level representation is preserved. - /// - /// @see GLSL floatBitsToInt man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - GLM_FUNC_DECL int floatBitsToInt(float const& v); - - /// Returns a signed integer value representing - /// the encoding of a floating-point value. The floatingpoint - /// value's bit-level representation is preserved. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL floatBitsToInt man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec floatBitsToInt(vec const& v); - - /// Returns a unsigned integer value representing - /// the encoding of a floating-point value. The floatingpoint - /// value's bit-level representation is preserved. - /// - /// @see GLSL floatBitsToUint man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - GLM_FUNC_DECL uint floatBitsToUint(float const& v); - - /// Returns a unsigned integer value representing - /// the encoding of a floating-point value. The floatingpoint - /// value's bit-level representation is preserved. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL floatBitsToUint man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec floatBitsToUint(vec const& v); - - /// Returns a floating-point value corresponding to a signed - /// integer encoding of a floating-point value. - /// If an inf or NaN is passed in, it will not signal, and the - /// resulting floating point value is unspecified. Otherwise, - /// the bit-level representation is preserved. - /// - /// @see GLSL intBitsToFloat man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - GLM_FUNC_DECL float intBitsToFloat(int const& v); - - /// Returns a floating-point value corresponding to a signed - /// integer encoding of a floating-point value. - /// If an inf or NaN is passed in, it will not signal, and the - /// resulting floating point value is unspecified. Otherwise, - /// the bit-level representation is preserved. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL intBitsToFloat man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec intBitsToFloat(vec const& v); - - /// Returns a floating-point value corresponding to a - /// unsigned integer encoding of a floating-point value. - /// If an inf or NaN is passed in, it will not signal, and the - /// resulting floating point value is unspecified. Otherwise, - /// the bit-level representation is preserved. - /// - /// @see GLSL uintBitsToFloat man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - GLM_FUNC_DECL float uintBitsToFloat(uint const& v); - - /// Returns a floating-point value corresponding to a - /// unsigned integer encoding of a floating-point value. - /// If an inf or NaN is passed in, it will not signal, and the - /// resulting floating point value is unspecified. Otherwise, - /// the bit-level representation is preserved. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL uintBitsToFloat man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec uintBitsToFloat(vec const& v); - - /// Computes and returns a * b + c. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see GLSL fma man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL genType fma(genType const& a, genType const& b, genType const& c); - - /// Splits x into a floating-point significand in the range - /// [0.5, 1.0) and an integral exponent of two, such that: - /// x = significand * exp(2, exponent) - /// - /// The significand is returned by the function and the - /// exponent is returned in the parameter exp. For a - /// floating-point value of zero, the significant and exponent - /// are both zero. For a floating-point value that is an - /// infinity or is not a number, the results are undefined. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see GLSL frexp man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL genType frexp(genType x, int& exp); - - template - GLM_FUNC_DECL vec frexp(vec const& v, vec& exp); - - /// Builds a floating-point number from x and the - /// corresponding integral exponent of two in exp, returning: - /// significand * exp(2, exponent) - /// - /// If this product is too large to be represented in the - /// floating-point type, the result is undefined. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see GLSL ldexp man page; - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL genType ldexp(genType const& x, int const& exp); - - template - GLM_FUNC_DECL vec ldexp(vec const& v, vec const& exp); - - /// @} -}//namespace glm - -#include "detail/func_common.inl" - diff --git a/third_party/glm/detail/_features.hpp b/third_party/glm/detail/_features.hpp deleted file mode 100755 index b0cbe9f..0000000 --- a/third_party/glm/detail/_features.hpp +++ /dev/null @@ -1,394 +0,0 @@ -#pragma once - -// #define GLM_CXX98_EXCEPTIONS -// #define GLM_CXX98_RTTI - -// #define GLM_CXX11_RVALUE_REFERENCES -// Rvalue references - GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n2118.html - -// GLM_CXX11_TRAILING_RETURN -// Rvalue references for *this - GCC not supported -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2439.htm - -// GLM_CXX11_NONSTATIC_MEMBER_INIT -// Initialization of class objects by rvalues - GCC any -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1610.html - -// GLM_CXX11_NONSTATIC_MEMBER_INIT -// Non-static data member initializers - GCC 4.7 -// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2008/n2756.htm - -// #define GLM_CXX11_VARIADIC_TEMPLATE -// Variadic templates - GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2242.pdf - -// -// Extending variadic template template parameters - GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2555.pdf - -// #define GLM_CXX11_GENERALIZED_INITIALIZERS -// Initializer lists - GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2672.htm - -// #define GLM_CXX11_STATIC_ASSERT -// Static assertions - GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1720.html - -// #define GLM_CXX11_AUTO_TYPE -// auto-typed variables - GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1984.pdf - -// #define GLM_CXX11_AUTO_TYPE -// Multi-declarator auto - GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1737.pdf - -// #define GLM_CXX11_AUTO_TYPE -// Removal of auto as a storage-class specifier - GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2546.htm - -// #define GLM_CXX11_AUTO_TYPE -// New function declarator syntax - GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2541.htm - -// #define GLM_CXX11_LAMBDAS -// New wording for C++0x lambdas - GCC 4.5 -// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2927.pdf - -// #define GLM_CXX11_DECLTYPE -// Declared type of an expression - GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2343.pdf - -// -// Right angle brackets - GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1757.html - -// -// Default template arguments for function templates DR226 GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#226 - -// -// Solving the SFINAE problem for expressions DR339 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2634.html - -// #define GLM_CXX11_ALIAS_TEMPLATE -// Template aliases N2258 GCC 4.7 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2258.pdf - -// -// Extern templates N1987 Yes -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1987.htm - -// #define GLM_CXX11_NULLPTR -// Null pointer constant N2431 GCC 4.6 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2431.pdf - -// #define GLM_CXX11_STRONG_ENUMS -// Strongly-typed enums N2347 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2347.pdf - -// -// Forward declarations for enums N2764 GCC 4.6 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2764.pdf - -// -// Generalized attributes N2761 GCC 4.8 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2761.pdf - -// -// Generalized constant expressions N2235 GCC 4.6 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2235.pdf - -// -// Alignment support N2341 GCC 4.8 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf - -// #define GLM_CXX11_DELEGATING_CONSTRUCTORS -// Delegating constructors N1986 GCC 4.7 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1986.pdf - -// -// Inheriting constructors N2540 GCC 4.8 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2540.htm - -// #define GLM_CXX11_EXPLICIT_CONVERSIONS -// Explicit conversion operators N2437 GCC 4.5 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2437.pdf - -// -// New character types N2249 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2249.html - -// -// Unicode string literals N2442 GCC 4.5 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm - -// -// Raw string literals N2442 GCC 4.5 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm - -// -// Universal character name literals N2170 GCC 4.5 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2170.html - -// #define GLM_CXX11_USER_LITERALS -// User-defined literals N2765 GCC 4.7 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2765.pdf - -// -// Standard Layout Types N2342 GCC 4.5 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2342.htm - -// #define GLM_CXX11_DEFAULTED_FUNCTIONS -// #define GLM_CXX11_DELETED_FUNCTIONS -// Defaulted and deleted functions N2346 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2346.htm - -// -// Extended friend declarations N1791 GCC 4.7 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1791.pdf - -// -// Extending sizeof N2253 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2253.html - -// #define GLM_CXX11_INLINE_NAMESPACES -// Inline namespaces N2535 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2535.htm - -// #define GLM_CXX11_UNRESTRICTED_UNIONS -// Unrestricted unions N2544 GCC 4.6 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf - -// #define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS -// Local and unnamed types as template arguments N2657 GCC 4.5 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm - -// #define GLM_CXX11_RANGE_FOR -// Range-based for N2930 GCC 4.6 -// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2930.html - -// #define GLM_CXX11_OVERRIDE_CONTROL -// Explicit virtual overrides N2928 N3206 N3272 GCC 4.7 -// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2928.htm -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3206.htm -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3272.htm - -// -// Minimal support for garbage collection and reachability-based leak detection N2670 No -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2670.htm - -// #define GLM_CXX11_NOEXCEPT -// Allowing move constructors to throw [noexcept] N3050 GCC 4.6 (core language only) -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3050.html - -// -// Defining move special member functions N3053 GCC 4.6 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3053.html - -// -// Sequence points N2239 Yes -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html - -// -// Atomic operations N2427 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html - -// -// Strong Compare and Exchange N2748 GCC 4.5 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html - -// -// Bidirectional Fences N2752 GCC 4.8 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2752.htm - -// -// Memory model N2429 GCC 4.8 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2429.htm - -// -// Data-dependency ordering: atomics and memory model N2664 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2664.htm - -// -// Propagating exceptions N2179 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2179.html - -// -// Abandoning a process and at_quick_exit N2440 GCC 4.8 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2440.htm - -// -// Allow atomics use in signal handlers N2547 Yes -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2547.htm - -// -// Thread-local storage N2659 GCC 4.8 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2659.htm - -// -// Dynamic initialization and destruction with concurrency N2660 GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2660.htm - -// -// __func__ predefined identifier N2340 GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2340.htm - -// -// C99 preprocessor N1653 GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1653.htm - -// -// long long N1811 GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1811.pdf - -// -// Extended integral types N1988 Yes -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1988.pdf - -#if(GLM_COMPILER & GLM_COMPILER_GCC) - -# define GLM_CXX11_STATIC_ASSERT - -#elif(GLM_COMPILER & GLM_COMPILER_CLANG) -# if(__has_feature(cxx_exceptions)) -# define GLM_CXX98_EXCEPTIONS -# endif - -# if(__has_feature(cxx_rtti)) -# define GLM_CXX98_RTTI -# endif - -# if(__has_feature(cxx_access_control_sfinae)) -# define GLM_CXX11_ACCESS_CONTROL_SFINAE -# endif - -# if(__has_feature(cxx_alias_templates)) -# define GLM_CXX11_ALIAS_TEMPLATE -# endif - -# if(__has_feature(cxx_alignas)) -# define GLM_CXX11_ALIGNAS -# endif - -# if(__has_feature(cxx_attributes)) -# define GLM_CXX11_ATTRIBUTES -# endif - -# if(__has_feature(cxx_constexpr)) -# define GLM_CXX11_CONSTEXPR -# endif - -# if(__has_feature(cxx_decltype)) -# define GLM_CXX11_DECLTYPE -# endif - -# if(__has_feature(cxx_default_function_template_args)) -# define GLM_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS -# endif - -# if(__has_feature(cxx_defaulted_functions)) -# define GLM_CXX11_DEFAULTED_FUNCTIONS -# endif - -# if(__has_feature(cxx_delegating_constructors)) -# define GLM_CXX11_DELEGATING_CONSTRUCTORS -# endif - -# if(__has_feature(cxx_deleted_functions)) -# define GLM_CXX11_DELETED_FUNCTIONS -# endif - -# if(__has_feature(cxx_explicit_conversions)) -# define GLM_CXX11_EXPLICIT_CONVERSIONS -# endif - -# if(__has_feature(cxx_generalized_initializers)) -# define GLM_CXX11_GENERALIZED_INITIALIZERS -# endif - -# if(__has_feature(cxx_implicit_moves)) -# define GLM_CXX11_IMPLICIT_MOVES -# endif - -# if(__has_feature(cxx_inheriting_constructors)) -# define GLM_CXX11_INHERITING_CONSTRUCTORS -# endif - -# if(__has_feature(cxx_inline_namespaces)) -# define GLM_CXX11_INLINE_NAMESPACES -# endif - -# if(__has_feature(cxx_lambdas)) -# define GLM_CXX11_LAMBDAS -# endif - -# if(__has_feature(cxx_local_type_template_args)) -# define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS -# endif - -# if(__has_feature(cxx_noexcept)) -# define GLM_CXX11_NOEXCEPT -# endif - -# if(__has_feature(cxx_nonstatic_member_init)) -# define GLM_CXX11_NONSTATIC_MEMBER_INIT -# endif - -# if(__has_feature(cxx_nullptr)) -# define GLM_CXX11_NULLPTR -# endif - -# if(__has_feature(cxx_override_control)) -# define GLM_CXX11_OVERRIDE_CONTROL -# endif - -# if(__has_feature(cxx_reference_qualified_functions)) -# define GLM_CXX11_REFERENCE_QUALIFIED_FUNCTIONS -# endif - -# if(__has_feature(cxx_range_for)) -# define GLM_CXX11_RANGE_FOR -# endif - -# if(__has_feature(cxx_raw_string_literals)) -# define GLM_CXX11_RAW_STRING_LITERALS -# endif - -# if(__has_feature(cxx_rvalue_references)) -# define GLM_CXX11_RVALUE_REFERENCES -# endif - -# if(__has_feature(cxx_static_assert)) -# define GLM_CXX11_STATIC_ASSERT -# endif - -# if(__has_feature(cxx_auto_type)) -# define GLM_CXX11_AUTO_TYPE -# endif - -# if(__has_feature(cxx_strong_enums)) -# define GLM_CXX11_STRONG_ENUMS -# endif - -# if(__has_feature(cxx_trailing_return)) -# define GLM_CXX11_TRAILING_RETURN -# endif - -# if(__has_feature(cxx_unicode_literals)) -# define GLM_CXX11_UNICODE_LITERALS -# endif - -# if(__has_feature(cxx_unrestricted_unions)) -# define GLM_CXX11_UNRESTRICTED_UNIONS -# endif - -# if(__has_feature(cxx_user_literals)) -# define GLM_CXX11_USER_LITERALS -# endif - -# if(__has_feature(cxx_variadic_templates)) -# define GLM_CXX11_VARIADIC_TEMPLATES -# endif - -#endif//(GLM_COMPILER & GLM_COMPILER_CLANG) diff --git a/third_party/glm/detail/_fixes.hpp b/third_party/glm/detail/_fixes.hpp deleted file mode 100755 index a503c7c..0000000 --- a/third_party/glm/detail/_fixes.hpp +++ /dev/null @@ -1,27 +0,0 @@ -#include - -//! Workaround for compatibility with other libraries -#ifdef max -#undef max -#endif - -//! Workaround for compatibility with other libraries -#ifdef min -#undef min -#endif - -//! Workaround for Android -#ifdef isnan -#undef isnan -#endif - -//! Workaround for Android -#ifdef isinf -#undef isinf -#endif - -//! Workaround for Chrone Native Client -#ifdef log2 -#undef log2 -#endif - diff --git a/third_party/glm/detail/_noise.hpp b/third_party/glm/detail/_noise.hpp deleted file mode 100755 index 5a874a0..0000000 --- a/third_party/glm/detail/_noise.hpp +++ /dev/null @@ -1,81 +0,0 @@ -#pragma once - -#include "../common.hpp" - -namespace glm{ -namespace detail -{ - template - GLM_FUNC_QUALIFIER T mod289(T const& x) - { - return x - floor(x * (static_cast(1.0) / static_cast(289.0))) * static_cast(289.0); - } - - template - GLM_FUNC_QUALIFIER T permute(T const& x) - { - return mod289(((x * static_cast(34)) + static_cast(1)) * x); - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> permute(vec<2, T, Q> const& x) - { - return mod289(((x * static_cast(34)) + static_cast(1)) * x); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> permute(vec<3, T, Q> const& x) - { - return mod289(((x * static_cast(34)) + static_cast(1)) * x); - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> permute(vec<4, T, Q> const& x) - { - return mod289(((x * static_cast(34)) + static_cast(1)) * x); - } - - template - GLM_FUNC_QUALIFIER T taylorInvSqrt(T const& r) - { - return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> taylorInvSqrt(vec<2, T, Q> const& r) - { - return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> taylorInvSqrt(vec<3, T, Q> const& r) - { - return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> taylorInvSqrt(vec<4, T, Q> const& r) - { - return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> fade(vec<2, T, Q> const& t) - { - return (t * t * t) * (t * (t * static_cast(6) - static_cast(15)) + static_cast(10)); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> fade(vec<3, T, Q> const& t) - { - return (t * t * t) * (t * (t * static_cast(6) - static_cast(15)) + static_cast(10)); - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> fade(vec<4, T, Q> const& t) - { - return (t * t * t) * (t * (t * static_cast(6) - static_cast(15)) + static_cast(10)); - } -}//namespace detail -}//namespace glm - diff --git a/third_party/glm/detail/_swizzle.hpp b/third_party/glm/detail/_swizzle.hpp deleted file mode 100755 index 87896ef..0000000 --- a/third_party/glm/detail/_swizzle.hpp +++ /dev/null @@ -1,804 +0,0 @@ -#pragma once - -namespace glm{ -namespace detail -{ - // Internal class for implementing swizzle operators - template - struct _swizzle_base0 - { - protected: - GLM_FUNC_QUALIFIER T& elem(size_t i){ return (reinterpret_cast(_buffer))[i]; } - GLM_FUNC_QUALIFIER T const& elem(size_t i) const{ return (reinterpret_cast(_buffer))[i]; } - - // Use an opaque buffer to *ensure* the compiler doesn't call a constructor. - // The size 1 buffer is assumed to aligned to the actual members so that the - // elem() - char _buffer[1]; - }; - - template - struct _swizzle_base1 : public _swizzle_base0 - { - }; - - template - struct _swizzle_base1<2, T, Q, E0,E1,-1,-2, Aligned> : public _swizzle_base0 - { - GLM_FUNC_QUALIFIER vec<2, T, Q> operator ()() const { return vec<2, T, Q>(this->elem(E0), this->elem(E1)); } - }; - - template - struct _swizzle_base1<3, T, Q, E0,E1,E2,-1, Aligned> : public _swizzle_base0 - { - GLM_FUNC_QUALIFIER vec<3, T, Q> operator ()() const { return vec<3, T, Q>(this->elem(E0), this->elem(E1), this->elem(E2)); } - }; - - template - struct _swizzle_base1<4, T, Q, E0,E1,E2,E3, Aligned> : public _swizzle_base0 - { - GLM_FUNC_QUALIFIER vec<4, T, Q> operator ()() const { return vec<4, T, Q>(this->elem(E0), this->elem(E1), this->elem(E2), this->elem(E3)); } - }; - - // Internal class for implementing swizzle operators - /* - Template parameters: - - T = type of scalar values (e.g. float, double) - N = number of components in the vector (e.g. 3) - E0...3 = what index the n-th element of this swizzle refers to in the unswizzled vec - - DUPLICATE_ELEMENTS = 1 if there is a repeated element, 0 otherwise (used to specialize swizzles - containing duplicate elements so that they cannot be used as r-values). - */ - template - struct _swizzle_base2 : public _swizzle_base1::value> - { - struct op_equal - { - GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e = t; } - }; - - struct op_minus - { - GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e -= t; } - }; - - struct op_plus - { - GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e += t; } - }; - - struct op_mul - { - GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e *= t; } - }; - - struct op_div - { - GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e /= t; } - }; - - public: - GLM_FUNC_QUALIFIER _swizzle_base2& operator= (const T& t) - { - for (int i = 0; i < N; ++i) - (*this)[i] = t; - return *this; - } - - GLM_FUNC_QUALIFIER _swizzle_base2& operator= (vec const& that) - { - _apply_op(that, op_equal()); - return *this; - } - - GLM_FUNC_QUALIFIER void operator -= (vec const& that) - { - _apply_op(that, op_minus()); - } - - GLM_FUNC_QUALIFIER void operator += (vec const& that) - { - _apply_op(that, op_plus()); - } - - GLM_FUNC_QUALIFIER void operator *= (vec const& that) - { - _apply_op(that, op_mul()); - } - - GLM_FUNC_QUALIFIER void operator /= (vec const& that) - { - _apply_op(that, op_div()); - } - - GLM_FUNC_QUALIFIER T& operator[](size_t i) - { - const int offset_dst[4] = { E0, E1, E2, E3 }; - return this->elem(offset_dst[i]); - } - GLM_FUNC_QUALIFIER T operator[](size_t i) const - { - const int offset_dst[4] = { E0, E1, E2, E3 }; - return this->elem(offset_dst[i]); - } - - protected: - template - GLM_FUNC_QUALIFIER void _apply_op(vec const& that, const U& op) - { - // Make a copy of the data in this == &that. - // The copier should optimize out the copy in cases where the function is - // properly inlined and the copy is not necessary. - T t[N]; - for (int i = 0; i < N; ++i) - t[i] = that[i]; - for (int i = 0; i < N; ++i) - op( (*this)[i], t[i] ); - } - }; - - // Specialization for swizzles containing duplicate elements. These cannot be modified. - template - struct _swizzle_base2 : public _swizzle_base1::value> - { - struct Stub {}; - - GLM_FUNC_QUALIFIER _swizzle_base2& operator= (Stub const&) { return *this; } - - GLM_FUNC_QUALIFIER T operator[] (size_t i) const - { - const int offset_dst[4] = { E0, E1, E2, E3 }; - return this->elem(offset_dst[i]); - } - }; - - template - struct _swizzle : public _swizzle_base2 - { - typedef _swizzle_base2 base_type; - - using base_type::operator=; - - GLM_FUNC_QUALIFIER operator vec () const { return (*this)(); } - }; - -// -// To prevent the C++ syntax from getting entirely overwhelming, define some alias macros -// -#define GLM_SWIZZLE_TEMPLATE1 template -#define GLM_SWIZZLE_TEMPLATE2 template -#define GLM_SWIZZLE_TYPE1 _swizzle -#define GLM_SWIZZLE_TYPE2 _swizzle - -// -// Wrapper for a binary operator (e.g. u.yy + v.zy) -// -#define GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(OPERAND) \ - GLM_SWIZZLE_TEMPLATE2 \ - GLM_FUNC_QUALIFIER vec operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b) \ - { \ - return a() OPERAND b(); \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER vec operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const vec& b) \ - { \ - return a() OPERAND b; \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER vec operator OPERAND ( const vec& a, const GLM_SWIZZLE_TYPE1& b) \ - { \ - return a OPERAND b(); \ - } - -// -// Wrapper for a operand between a swizzle and a binary (e.g. 1.0f - u.xyz) -// -#define GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(OPERAND) \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER vec operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const T& b) \ - { \ - return a() OPERAND b; \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER vec operator OPERAND ( const T& a, const GLM_SWIZZLE_TYPE1& b) \ - { \ - return a OPERAND b(); \ - } - -// -// Macro for wrapping a function taking one argument (e.g. abs()) -// -#define GLM_SWIZZLE_FUNCTION_1_ARGS(RETURN_TYPE,FUNCTION) \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a) \ - { \ - return FUNCTION(a()); \ - } - -// -// Macro for wrapping a function taking two vector arguments (e.g. dot()). -// -#define GLM_SWIZZLE_FUNCTION_2_ARGS(RETURN_TYPE,FUNCTION) \ - GLM_SWIZZLE_TEMPLATE2 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b) \ - { \ - return FUNCTION(a(), b()); \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE1& b) \ - { \ - return FUNCTION(a(), b()); \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const typename V& b) \ - { \ - return FUNCTION(a(), b); \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const V& a, const GLM_SWIZZLE_TYPE1& b) \ - { \ - return FUNCTION(a, b()); \ - } - -// -// Macro for wrapping a function take 2 vec arguments followed by a scalar (e.g. mix()). -// -#define GLM_SWIZZLE_FUNCTION_2_ARGS_SCALAR(RETURN_TYPE,FUNCTION) \ - GLM_SWIZZLE_TEMPLATE2 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b, const T& c) \ - { \ - return FUNCTION(a(), b(), c); \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE1& b, const T& c) \ - { \ - return FUNCTION(a(), b(), c); \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const typename S0::vec_type& b, const T& c)\ - { \ - return FUNCTION(a(), b, c); \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const typename V& a, const GLM_SWIZZLE_TYPE1& b, const T& c) \ - { \ - return FUNCTION(a, b(), c); \ - } - -}//namespace detail -}//namespace glm - -namespace glm -{ - namespace detail - { - GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(-) - GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(*) - GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(+) - GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(-) - GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(*) - GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(/) - } - - // - // Swizzles are distinct types from the unswizzled type. The below macros will - // provide template specializations for the swizzle types for the given functions - // so that the compiler does not have any ambiguity to choosing how to handle - // the function. - // - // The alternative is to use the operator()() when calling the function in order - // to explicitly convert the swizzled type to the unswizzled type. - // - - //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, abs); - //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, acos); - //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, acosh); - //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, all); - //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, any); - - //GLM_SWIZZLE_FUNCTION_2_ARGS(value_type, dot); - //GLM_SWIZZLE_FUNCTION_2_ARGS(vec_type, cross); - //GLM_SWIZZLE_FUNCTION_2_ARGS(vec_type, step); - //GLM_SWIZZLE_FUNCTION_2_ARGS_SCALAR(vec_type, mix); -} - -#define GLM_SWIZZLE2_2_MEMBERS(T, Q, E0,E1) \ - struct { detail::_swizzle<2, T, Q, 0,0,-1,-2> E0 ## E0; }; \ - struct { detail::_swizzle<2, T, Q, 0,1,-1,-2> E0 ## E1; }; \ - struct { detail::_swizzle<2, T, Q, 1,0,-1,-2> E1 ## E0; }; \ - struct { detail::_swizzle<2, T, Q, 1,1,-1,-2> E1 ## E1; }; - -#define GLM_SWIZZLE2_3_MEMBERS(T, Q, E0,E1) \ - struct { detail::_swizzle<3,T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<3,T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<3,T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<3,T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<3,T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<3,T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<3,T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<3,T, Q, 1,1,1,-1> E1 ## E1 ## E1; }; - -#define GLM_SWIZZLE2_4_MEMBERS(T, Q, E0,E1) \ - struct { detail::_swizzle<4,T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; - -#define GLM_SWIZZLE3_2_MEMBERS(T, Q, E0,E1,E2) \ - struct { detail::_swizzle<2,T, Q, 0,0,-1,-2> E0 ## E0; }; \ - struct { detail::_swizzle<2,T, Q, 0,1,-1,-2> E0 ## E1; }; \ - struct { detail::_swizzle<2,T, Q, 0,2,-1,-2> E0 ## E2; }; \ - struct { detail::_swizzle<2,T, Q, 1,0,-1,-2> E1 ## E0; }; \ - struct { detail::_swizzle<2,T, Q, 1,1,-1,-2> E1 ## E1; }; \ - struct { detail::_swizzle<2,T, Q, 1,2,-1,-2> E1 ## E2; }; \ - struct { detail::_swizzle<2,T, Q, 2,0,-1,-2> E2 ## E0; }; \ - struct { detail::_swizzle<2,T, Q, 2,1,-1,-2> E2 ## E1; }; \ - struct { detail::_swizzle<2,T, Q, 2,2,-1,-2> E2 ## E2; }; - -#define GLM_SWIZZLE3_3_MEMBERS(T, Q ,E0,E1,E2) \ - struct { detail::_swizzle<3, T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 0,0,2,-1> E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 0,1,2,-1> E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 0,2,0,-1> E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 0,2,1,-1> E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 0,2,2,-1> E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 1,0,2,-1> E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 1,1,1,-1> E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 1,1,2,-1> E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 1,2,0,-1> E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 1,2,1,-1> E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 1,2,2,-1> E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 2,0,0,-1> E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 2,0,1,-1> E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 2,0,2,-1> E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 2,1,0,-1> E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 2,1,1,-1> E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 2,1,2,-1> E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 2,2,0,-1> E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 2,2,1,-1> E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 2,2,2,-1> E2 ## E2 ## E2; }; - -#define GLM_SWIZZLE3_4_MEMBERS(T, Q, E0,E1,E2) \ - struct { detail::_swizzle<4,T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,0,2> E0 ## E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,1,2> E0 ## E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,2,0> E0 ## E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,2,1> E0 ## E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,2,2> E0 ## E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,0,2> E0 ## E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,1,2> E0 ## E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,2,0> E0 ## E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,2,1> E0 ## E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,2,2> E0 ## E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,0,0> E0 ## E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,0,1> E0 ## E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,0,2> E0 ## E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,1,0> E0 ## E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,1,1> E0 ## E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,1,2> E0 ## E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,2,0> E0 ## E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,2,1> E0 ## E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,2,2> E0 ## E2 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,0,2> E1 ## E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,1,2> E1 ## E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,2,0> E1 ## E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,2,1> E1 ## E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,2,2> E1 ## E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,0,2> E1 ## E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,1,2> E1 ## E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,2,0> E1 ## E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,2,1> E1 ## E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,2,2> E1 ## E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,0,0> E1 ## E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,0,1> E1 ## E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,0,2> E1 ## E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,1,0> E1 ## E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,1,1> E1 ## E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,1,2> E1 ## E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,2,0> E1 ## E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,2,1> E1 ## E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,2,2> E1 ## E2 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,0,0> E2 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,0,1> E2 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,0,2> E2 ## E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,1,0> E2 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,1,1> E2 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,1,2> E2 ## E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,2,0> E2 ## E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,2,1> E2 ## E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,2,2> E2 ## E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,0,0> E2 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,0,1> E2 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,0,2> E2 ## E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,1,0> E2 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,1,1> E2 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,1,2> E2 ## E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,2,0> E2 ## E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,2,1> E2 ## E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,2,2> E2 ## E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,0,0> E2 ## E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,0,1> E2 ## E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,0,2> E2 ## E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,1,0> E2 ## E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,1,1> E2 ## E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,1,2> E2 ## E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,2,0> E2 ## E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,2,1> E2 ## E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,2,2> E2 ## E2 ## E2 ## E2; }; - -#define GLM_SWIZZLE4_2_MEMBERS(T, Q, E0,E1,E2,E3) \ - struct { detail::_swizzle<2,T, Q, 0,0,-1,-2> E0 ## E0; }; \ - struct { detail::_swizzle<2,T, Q, 0,1,-1,-2> E0 ## E1; }; \ - struct { detail::_swizzle<2,T, Q, 0,2,-1,-2> E0 ## E2; }; \ - struct { detail::_swizzle<2,T, Q, 0,3,-1,-2> E0 ## E3; }; \ - struct { detail::_swizzle<2,T, Q, 1,0,-1,-2> E1 ## E0; }; \ - struct { detail::_swizzle<2,T, Q, 1,1,-1,-2> E1 ## E1; }; \ - struct { detail::_swizzle<2,T, Q, 1,2,-1,-2> E1 ## E2; }; \ - struct { detail::_swizzle<2,T, Q, 1,3,-1,-2> E1 ## E3; }; \ - struct { detail::_swizzle<2,T, Q, 2,0,-1,-2> E2 ## E0; }; \ - struct { detail::_swizzle<2,T, Q, 2,1,-1,-2> E2 ## E1; }; \ - struct { detail::_swizzle<2,T, Q, 2,2,-1,-2> E2 ## E2; }; \ - struct { detail::_swizzle<2,T, Q, 2,3,-1,-2> E2 ## E3; }; \ - struct { detail::_swizzle<2,T, Q, 3,0,-1,-2> E3 ## E0; }; \ - struct { detail::_swizzle<2,T, Q, 3,1,-1,-2> E3 ## E1; }; \ - struct { detail::_swizzle<2,T, Q, 3,2,-1,-2> E3 ## E2; }; \ - struct { detail::_swizzle<2,T, Q, 3,3,-1,-2> E3 ## E3; }; - -#define GLM_SWIZZLE4_3_MEMBERS(T, Q, E0,E1,E2,E3) \ - struct { detail::_swizzle<3, T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 0,0,2,-1> E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 0,0,3,-1> E0 ## E0 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 0,1,2,-1> E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 0,1,3,-1> E0 ## E1 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 0,2,0,-1> E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 0,2,1,-1> E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 0,2,2,-1> E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 0,2,3,-1> E0 ## E2 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 0,3,0,-1> E0 ## E3 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 0,3,1,-1> E0 ## E3 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 0,3,2,-1> E0 ## E3 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 0,3,3,-1> E0 ## E3 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 1,0,2,-1> E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 1,0,3,-1> E1 ## E0 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 1,1,1,-1> E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 1,1,2,-1> E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 1,1,3,-1> E1 ## E1 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 1,2,0,-1> E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 1,2,1,-1> E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 1,2,2,-1> E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 1,2,3,-1> E1 ## E2 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 1,3,0,-1> E1 ## E3 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 1,3,1,-1> E1 ## E3 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 1,3,2,-1> E1 ## E3 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 1,3,3,-1> E1 ## E3 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 2,0,0,-1> E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 2,0,1,-1> E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 2,0,2,-1> E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 2,0,3,-1> E2 ## E0 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 2,1,0,-1> E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 2,1,1,-1> E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 2,1,2,-1> E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 2,1,3,-1> E2 ## E1 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 2,2,0,-1> E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 2,2,1,-1> E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 2,2,2,-1> E2 ## E2 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 2,2,3,-1> E2 ## E2 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 2,3,0,-1> E2 ## E3 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 2,3,1,-1> E2 ## E3 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 2,3,2,-1> E2 ## E3 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 2,3,3,-1> E2 ## E3 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 3,0,0,-1> E3 ## E0 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 3,0,1,-1> E3 ## E0 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 3,0,2,-1> E3 ## E0 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 3,0,3,-1> E3 ## E0 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 3,1,0,-1> E3 ## E1 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 3,1,1,-1> E3 ## E1 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 3,1,2,-1> E3 ## E1 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 3,1,3,-1> E3 ## E1 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 3,2,0,-1> E3 ## E2 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 3,2,1,-1> E3 ## E2 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 3,2,2,-1> E3 ## E2 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 3,2,3,-1> E3 ## E2 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 3,3,0,-1> E3 ## E3 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 3,3,1,-1> E3 ## E3 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 3,3,2,-1> E3 ## E3 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 3,3,3,-1> E3 ## E3 ## E3; }; - -#define GLM_SWIZZLE4_4_MEMBERS(T, Q, E0,E1,E2,E3) \ - struct { detail::_swizzle<4, T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,0,2> E0 ## E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,0,3> E0 ## E0 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,1,2> E0 ## E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,1,3> E0 ## E0 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,2,0> E0 ## E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,2,1> E0 ## E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,2,2> E0 ## E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,2,3> E0 ## E0 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,3,0> E0 ## E0 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,3,1> E0 ## E0 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,3,2> E0 ## E0 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,3,3> E0 ## E0 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,0,2> E0 ## E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,0,3> E0 ## E1 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,1,2> E0 ## E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,1,3> E0 ## E1 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,2,0> E0 ## E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,2,1> E0 ## E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,2,2> E0 ## E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,2,3> E0 ## E1 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,3,0> E0 ## E1 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,3,1> E0 ## E1 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,3,2> E0 ## E1 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,3,3> E0 ## E1 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,0,0> E0 ## E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,0,1> E0 ## E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,0,2> E0 ## E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,0,3> E0 ## E2 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,1,0> E0 ## E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,1,1> E0 ## E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,1,2> E0 ## E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,1,3> E0 ## E2 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,2,0> E0 ## E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,2,1> E0 ## E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,2,2> E0 ## E2 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,2,3> E0 ## E2 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,3,0> E0 ## E2 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,3,1> E0 ## E2 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,3,2> E0 ## E2 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,3,3> E0 ## E2 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,0,0> E0 ## E3 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,0,1> E0 ## E3 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,0,2> E0 ## E3 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,0,3> E0 ## E3 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,1,0> E0 ## E3 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,1,1> E0 ## E3 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,1,2> E0 ## E3 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,1,3> E0 ## E3 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,2,0> E0 ## E3 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,2,1> E0 ## E3 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,2,2> E0 ## E3 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,2,3> E0 ## E3 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,3,0> E0 ## E3 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,3,1> E0 ## E3 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,3,2> E0 ## E3 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,3,3> E0 ## E3 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,0,2> E1 ## E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,0,3> E1 ## E0 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,1,2> E1 ## E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,1,3> E1 ## E0 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,2,0> E1 ## E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,2,1> E1 ## E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,2,2> E1 ## E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,2,3> E1 ## E0 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,3,0> E1 ## E0 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,3,1> E1 ## E0 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,3,2> E1 ## E0 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,3,3> E1 ## E0 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,0,2> E1 ## E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,0,3> E1 ## E1 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,1,2> E1 ## E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,1,3> E1 ## E1 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,2,0> E1 ## E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,2,1> E1 ## E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,2,2> E1 ## E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,2,3> E1 ## E1 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,3,0> E1 ## E1 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,3,1> E1 ## E1 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,3,2> E1 ## E1 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,3,3> E1 ## E1 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,0,0> E1 ## E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,0,1> E1 ## E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,0,2> E1 ## E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,0,3> E1 ## E2 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,1,0> E1 ## E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,1,1> E1 ## E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,1,2> E1 ## E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,1,3> E1 ## E2 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,2,0> E1 ## E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,2,1> E1 ## E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,2,2> E1 ## E2 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,2,3> E1 ## E2 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,3,0> E1 ## E2 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,3,1> E1 ## E2 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,3,2> E1 ## E2 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,3,3> E1 ## E2 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,0,0> E1 ## E3 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,0,1> E1 ## E3 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,0,2> E1 ## E3 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,0,3> E1 ## E3 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,1,0> E1 ## E3 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,1,1> E1 ## E3 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,1,2> E1 ## E3 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,1,3> E1 ## E3 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,2,0> E1 ## E3 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,2,1> E1 ## E3 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,2,2> E1 ## E3 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,2,3> E1 ## E3 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,3,0> E1 ## E3 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,3,1> E1 ## E3 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,3,2> E1 ## E3 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,3,3> E1 ## E3 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,0,0> E2 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,0,1> E2 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,0,2> E2 ## E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,0,3> E2 ## E0 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,1,0> E2 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,1,1> E2 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,1,2> E2 ## E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,1,3> E2 ## E0 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,2,0> E2 ## E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,2,1> E2 ## E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,2,2> E2 ## E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,2,3> E2 ## E0 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,3,0> E2 ## E0 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,3,1> E2 ## E0 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,3,2> E2 ## E0 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,3,3> E2 ## E0 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,0,0> E2 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,0,1> E2 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,0,2> E2 ## E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,0,3> E2 ## E1 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,1,0> E2 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,1,1> E2 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,1,2> E2 ## E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,1,3> E2 ## E1 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,2,0> E2 ## E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,2,1> E2 ## E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,2,2> E2 ## E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,2,3> E2 ## E1 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,3,0> E2 ## E1 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,3,1> E2 ## E1 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,3,2> E2 ## E1 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,3,3> E2 ## E1 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,0,0> E2 ## E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,0,1> E2 ## E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,0,2> E2 ## E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,0,3> E2 ## E2 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,1,0> E2 ## E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,1,1> E2 ## E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,1,2> E2 ## E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,1,3> E2 ## E2 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,2,0> E2 ## E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,2,1> E2 ## E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,2,2> E2 ## E2 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,2,3> E2 ## E2 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,3,0> E2 ## E2 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,3,1> E2 ## E2 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,3,2> E2 ## E2 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,3,3> E2 ## E2 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,0,0> E2 ## E3 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,0,1> E2 ## E3 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,0,2> E2 ## E3 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,0,3> E2 ## E3 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,1,0> E2 ## E3 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,1,1> E2 ## E3 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,1,2> E2 ## E3 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,1,3> E2 ## E3 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,2,0> E2 ## E3 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,2,1> E2 ## E3 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,2,2> E2 ## E3 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,2,3> E2 ## E3 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,3,0> E2 ## E3 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,3,1> E2 ## E3 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,3,2> E2 ## E3 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,3,3> E2 ## E3 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,0,0> E3 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,0,1> E3 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,0,2> E3 ## E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,0,3> E3 ## E0 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,1,0> E3 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,1,1> E3 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,1,2> E3 ## E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,1,3> E3 ## E0 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,2,0> E3 ## E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,2,1> E3 ## E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,2,2> E3 ## E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,2,3> E3 ## E0 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,3,0> E3 ## E0 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,3,1> E3 ## E0 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,3,2> E3 ## E0 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,3,3> E3 ## E0 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,0,0> E3 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,0,1> E3 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,0,2> E3 ## E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,0,3> E3 ## E1 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,1,0> E3 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,1,1> E3 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,1,2> E3 ## E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,1,3> E3 ## E1 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,2,0> E3 ## E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,2,1> E3 ## E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,2,2> E3 ## E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,2,3> E3 ## E1 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,3,0> E3 ## E1 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,3,1> E3 ## E1 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,3,2> E3 ## E1 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,3,3> E3 ## E1 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,0,0> E3 ## E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,0,1> E3 ## E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,0,2> E3 ## E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,0,3> E3 ## E2 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,1,0> E3 ## E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,1,1> E3 ## E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,1,2> E3 ## E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,1,3> E3 ## E2 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,2,0> E3 ## E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,2,1> E3 ## E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,2,2> E3 ## E2 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,2,3> E3 ## E2 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,3,0> E3 ## E2 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,3,1> E3 ## E2 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,3,2> E3 ## E2 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,3,3> E3 ## E2 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,0,0> E3 ## E3 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,0,1> E3 ## E3 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,0,2> E3 ## E3 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,0,3> E3 ## E3 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,1,0> E3 ## E3 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,1,1> E3 ## E3 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,1,2> E3 ## E3 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,1,3> E3 ## E3 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,2,0> E3 ## E3 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,2,1> E3 ## E3 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,2,2> E3 ## E3 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,2,3> E3 ## E3 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,3,0> E3 ## E3 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,3,1> E3 ## E3 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,3,2> E3 ## E3 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,3,3> E3 ## E3 ## E3 ## E3; }; diff --git a/third_party/glm/detail/_swizzle_func.hpp b/third_party/glm/detail/_swizzle_func.hpp deleted file mode 100755 index d93c6af..0000000 --- a/third_party/glm/detail/_swizzle_func.hpp +++ /dev/null @@ -1,682 +0,0 @@ -#pragma once - -#define GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, CONST, A, B) \ - vec<2, T, Q> A ## B() CONST \ - { \ - return vec<2, T, Q>(this->A, this->B); \ - } - -#define GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, CONST, A, B, C) \ - vec<3, T, Q> A ## B ## C() CONST \ - { \ - return vec<3, T, Q>(this->A, this->B, this->C); \ - } - -#define GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, CONST, A, B, C, D) \ - vec<4, T, Q> A ## B ## C ## D() CONST \ - { \ - return vec<4, T, Q>(this->A, this->B, this->C, this->D); \ - } - -#define GLM_SWIZZLE_GEN_VEC2_ENTRY_DEF(T, P, L, CONST, A, B) \ - template \ - vec vec::A ## B() CONST \ - { \ - return vec<2, T, Q>(this->A, this->B); \ - } - -#define GLM_SWIZZLE_GEN_VEC3_ENTRY_DEF(T, P, L, CONST, A, B, C) \ - template \ - vec<3, T, Q> vec::A ## B ## C() CONST \ - { \ - return vec<3, T, Q>(this->A, this->B, this->C); \ - } - -#define GLM_SWIZZLE_GEN_VEC4_ENTRY_DEF(T, P, L, CONST, A, B, C, D) \ - template \ - vec<4, T, Q> vec::A ## B ## C ## D() CONST \ - { \ - return vec<4, T, Q>(this->A, this->B, this->C, this->D); \ - } - -#define GLM_MUTABLE - -#define GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, 2, GLM_MUTABLE, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, 2, GLM_MUTABLE, B, A) - -#define GLM_SWIZZLE_GEN_REF_FROM_VEC2(T, P) \ - GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, x, y) \ - GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, r, g) \ - GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, s, t) - -#define GLM_SWIZZLE_GEN_REF2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, B) - -#define GLM_SWIZZLE_GEN_REF3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, A, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, A, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, B, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, B, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, C, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, C, B, A) - -#define GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_REF3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_REF2_FROM_VEC3_SWIZZLE(T, P, A, B, C) - -#define GLM_SWIZZLE_GEN_REF_FROM_VEC3(T, P) \ - GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, x, y, z) \ - GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, r, g, b) \ - GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, s, t, p) - -#define GLM_SWIZZLE_GEN_REF2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, C) - -#define GLM_SWIZZLE_GEN_REF3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, B, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, D, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, D, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, A, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, D, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, D, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, A, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, B, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, D, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, D, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, C, B) - -#define GLM_SWIZZLE_GEN_REF4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, C, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, C, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, D, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, D, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, B, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, C, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, C, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, D, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, D, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, A, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, A, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, B, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, B, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, D, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, D, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, A, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, A, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, B, C, A) - -#define GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_REF2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_REF3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_REF4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) - -#define GLM_SWIZZLE_GEN_REF_FROM_VEC4(T, P) \ - GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, x, y, z, w) \ - GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, r, g, b, a) \ - GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, s, t, p, q) - -#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, P, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) - -#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC2_SWIZZLE(T, P, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) - -#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC2_SWIZZLE(T, P, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) - -#define GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, A, B) \ - GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, P, A, B) \ - GLM_SWIZZLE_GEN_VEC3_FROM_VEC2_SWIZZLE(T, P, A, B) \ - GLM_SWIZZLE_GEN_VEC4_FROM_VEC2_SWIZZLE(T, P, A, B) - -#define GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, P) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, x, y) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, r, g) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, s, t) - -#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, C) - -#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, C) - -#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, C) - -#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_FROM_VEC3_SWIZZLE(T, P, A, B, C) - -#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, P) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, x, y, z) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, r, g, b) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, s, t, p) - -#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, D) - -#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, D) - -#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, D) - -#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) - -#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, P) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, x, y, z, w) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, r, g, b, a) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, s, t, p, q) - diff --git a/third_party/glm/detail/_vectorize.hpp b/third_party/glm/detail/_vectorize.hpp deleted file mode 100755 index 1fcaec3..0000000 --- a/third_party/glm/detail/_vectorize.hpp +++ /dev/null @@ -1,162 +0,0 @@ -#pragma once - -namespace glm{ -namespace detail -{ - template class vec, length_t L, typename R, typename T, qualifier Q> - struct functor1{}; - - template class vec, typename R, typename T, qualifier Q> - struct functor1 - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<1, R, Q> call(R (*Func) (T x), vec<1, T, Q> const& v) - { - return vec<1, R, Q>(Func(v.x)); - } - }; - - template class vec, typename R, typename T, qualifier Q> - struct functor1 - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<2, R, Q> call(R (*Func) (T x), vec<2, T, Q> const& v) - { - return vec<2, R, Q>(Func(v.x), Func(v.y)); - } - }; - - template class vec, typename R, typename T, qualifier Q> - struct functor1 - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<3, R, Q> call(R (*Func) (T x), vec<3, T, Q> const& v) - { - return vec<3, R, Q>(Func(v.x), Func(v.y), Func(v.z)); - } - }; - - template class vec, typename R, typename T, qualifier Q> - struct functor1 - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, R, Q> call(R (*Func) (T x), vec<4, T, Q> const& v) - { - return vec<4, R, Q>(Func(v.x), Func(v.y), Func(v.z), Func(v.w)); - } - }; - - template class vec, length_t L, typename T, qualifier Q> - struct functor2{}; - - template class vec, typename T, qualifier Q> - struct functor2 - { - GLM_FUNC_QUALIFIER static vec<1, T, Q> call(T (*Func) (T x, T y), vec<1, T, Q> const& a, vec<1, T, Q> const& b) - { - return vec<1, T, Q>(Func(a.x, b.x)); - } - }; - - template class vec, typename T, qualifier Q> - struct functor2 - { - GLM_FUNC_QUALIFIER static vec<2, T, Q> call(T (*Func) (T x, T y), vec<2, T, Q> const& a, vec<2, T, Q> const& b) - { - return vec<2, T, Q>(Func(a.x, b.x), Func(a.y, b.y)); - } - }; - - template class vec, typename T, qualifier Q> - struct functor2 - { - GLM_FUNC_QUALIFIER static vec<3, T, Q> call(T (*Func) (T x, T y), vec<3, T, Q> const& a, vec<3, T, Q> const& b) - { - return vec<3, T, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z)); - } - }; - - template class vec, typename T, qualifier Q> - struct functor2 - { - GLM_FUNC_QUALIFIER static vec<4, T, Q> call(T (*Func) (T x, T y), vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z), Func(a.w, b.w)); - } - }; - - template class vec, length_t L, typename T, qualifier Q> - struct functor2_vec_sca{}; - - template class vec, typename T, qualifier Q> - struct functor2_vec_sca - { - GLM_FUNC_QUALIFIER static vec<1, T, Q> call(T (*Func) (T x, T y), vec<1, T, Q> const& a, T b) - { - return vec<1, T, Q>(Func(a.x, b)); - } - }; - - template class vec, typename T, qualifier Q> - struct functor2_vec_sca - { - GLM_FUNC_QUALIFIER static vec<2, T, Q> call(T (*Func) (T x, T y), vec<2, T, Q> const& a, T b) - { - return vec<2, T, Q>(Func(a.x, b), Func(a.y, b)); - } - }; - - template class vec, typename T, qualifier Q> - struct functor2_vec_sca - { - GLM_FUNC_QUALIFIER static vec<3, T, Q> call(T (*Func) (T x, T y), vec<3, T, Q> const& a, T b) - { - return vec<3, T, Q>(Func(a.x, b), Func(a.y, b), Func(a.z, b)); - } - }; - - template class vec, typename T, qualifier Q> - struct functor2_vec_sca - { - GLM_FUNC_QUALIFIER static vec<4, T, Q> call(T (*Func) (T x, T y), vec<4, T, Q> const& a, T b) - { - return vec<4, T, Q>(Func(a.x, b), Func(a.y, b), Func(a.z, b), Func(a.w, b)); - } - }; - - template - struct functor2_vec_int {}; - - template - struct functor2_vec_int<1, T, Q> - { - GLM_FUNC_QUALIFIER static vec<1, int, Q> call(int (*Func) (T x, int y), vec<1, T, Q> const& a, vec<1, int, Q> const& b) - { - return vec<1, int, Q>(Func(a.x, b.x)); - } - }; - - template - struct functor2_vec_int<2, T, Q> - { - GLM_FUNC_QUALIFIER static vec<2, int, Q> call(int (*Func) (T x, int y), vec<2, T, Q> const& a, vec<2, int, Q> const& b) - { - return vec<2, int, Q>(Func(a.x, b.x), Func(a.y, b.y)); - } - }; - - template - struct functor2_vec_int<3, T, Q> - { - GLM_FUNC_QUALIFIER static vec<3, int, Q> call(int (*Func) (T x, int y), vec<3, T, Q> const& a, vec<3, int, Q> const& b) - { - return vec<3, int, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z)); - } - }; - - template - struct functor2_vec_int<4, T, Q> - { - GLM_FUNC_QUALIFIER static vec<4, int, Q> call(int (*Func) (T x, int y), vec<4, T, Q> const& a, vec<4, int, Q> const& b) - { - return vec<4, int, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z), Func(a.w, b.w)); - } - }; -}//namespace detail -}//namespace glm diff --git a/third_party/glm/detail/compute_common.hpp b/third_party/glm/detail/compute_common.hpp deleted file mode 100755 index cc24b9e..0000000 --- a/third_party/glm/detail/compute_common.hpp +++ /dev/null @@ -1,50 +0,0 @@ -#pragma once - -#include "setup.hpp" -#include - -namespace glm{ -namespace detail -{ - template - struct compute_abs - {}; - - template - struct compute_abs - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genFIType call(genFIType x) - { - GLM_STATIC_ASSERT( - std::numeric_limits::is_iec559 || std::numeric_limits::is_signed, - "'abs' only accept floating-point and integer scalar or vector inputs"); - - return x >= genFIType(0) ? x : -x; - // TODO, perf comp with: *(((int *) &x) + 1) &= 0x7fffffff; - } - }; - -#if GLM_COMPILER & GLM_COMPILER_CUDA - template<> - struct compute_abs - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static float call(float x) - { - return fabsf(x); - } - }; -#endif - - template - struct compute_abs - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genFIType call(genFIType x) - { - GLM_STATIC_ASSERT( - (!std::numeric_limits::is_signed && std::numeric_limits::is_integer), - "'abs' only accept floating-point and integer scalar or vector inputs"); - return x; - } - }; -}//namespace detail -}//namespace glm diff --git a/third_party/glm/detail/compute_vector_relational.hpp b/third_party/glm/detail/compute_vector_relational.hpp deleted file mode 100755 index 167b634..0000000 --- a/third_party/glm/detail/compute_vector_relational.hpp +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once - -//#include "compute_common.hpp" -#include "setup.hpp" -#include - -namespace glm{ -namespace detail -{ - template - struct compute_equal - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(T a, T b) - { - return a == b; - } - }; -/* - template - struct compute_equal - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(T a, T b) - { - return detail::compute_abs::is_signed>::call(b - a) <= static_cast(0); - //return std::memcmp(&a, &b, sizeof(T)) == 0; - } - }; -*/ -}//namespace detail -}//namespace glm diff --git a/third_party/glm/detail/func_common.inl b/third_party/glm/detail/func_common.inl deleted file mode 100755 index 4b5f144..0000000 --- a/third_party/glm/detail/func_common.inl +++ /dev/null @@ -1,792 +0,0 @@ -/// @ref core -/// @file glm/detail/func_common.inl - -#include "../vector_relational.hpp" -#include "compute_common.hpp" -#include "type_vec1.hpp" -#include "type_vec2.hpp" -#include "type_vec3.hpp" -#include "type_vec4.hpp" -#include "_vectorize.hpp" -#include - -namespace glm -{ - // min - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType min(genType x, genType y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'min' only accept floating-point or integer inputs"); - return (y < x) ? y : x; - } - - // max - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType max(genType x, genType y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'max' only accept floating-point or integer inputs"); - - return (x < y) ? y : x; - } - - // abs - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR int abs(int x) - { - int const y = x >> (sizeof(int) * 8 - 1); - return (x ^ y) - y; - } - - // round -# if GLM_HAS_CXX11_STL - using ::std::round; -# else - template - GLM_FUNC_QUALIFIER genType round(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'round' only accept floating-point inputs"); - - return x < static_cast(0) ? static_cast(int(x - static_cast(0.5))) : static_cast(int(x + static_cast(0.5))); - } -# endif - - // trunc -# if GLM_HAS_CXX11_STL - using ::std::trunc; -# else - template - GLM_FUNC_QUALIFIER genType trunc(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'trunc' only accept floating-point inputs"); - - return x < static_cast(0) ? -std::floor(-x) : std::floor(x); - } -# endif - -}//namespace glm - -namespace glm{ -namespace detail -{ - template - struct compute_abs_vector - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& x) - { - return detail::functor1::call(abs, x); - } - }; - - template - struct compute_mix_vector - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y, vec const& a) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a"); - - return vec(vec(x) * (static_cast(1) - a) + vec(y) * a); - } - }; - - template - struct compute_mix_vector - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y, vec const& a) - { - vec Result; - for(length_t i = 0; i < x.length(); ++i) - Result[i] = a[i] ? y[i] : x[i]; - return Result; - } - }; - - template - struct compute_mix_scalar - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y, U const& a) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a"); - - return vec(vec(x) * (static_cast(1) - a) + vec(y) * a); - } - }; - - template - struct compute_mix_scalar - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y, bool const& a) - { - return a ? y : x; - } - }; - - template - struct compute_mix - { - GLM_FUNC_QUALIFIER static T call(T const& x, T const& y, U const& a) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a"); - - return static_cast(static_cast(x) * (static_cast(1) - a) + static_cast(y) * a); - } - }; - - template - struct compute_mix - { - GLM_FUNC_QUALIFIER static T call(T const& x, T const& y, bool const& a) - { - return a ? y : x; - } - }; - - template - struct compute_sign - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return vec(glm::lessThan(vec(0), x)) - vec(glm::lessThan(x, vec(0))); - } - }; - -# if GLM_ARCH == GLM_ARCH_X86 - template - struct compute_sign - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - T const Shift(static_cast(sizeof(T) * 8 - 1)); - vec const y(vec::type, Q>(-x) >> typename detail::make_unsigned::type(Shift)); - - return (x >> Shift) | y; - } - }; -# endif - - template - struct compute_floor - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return detail::functor1::call(std::floor, x); - } - }; - - template - struct compute_ceil - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return detail::functor1::call(std::ceil, x); - } - }; - - template - struct compute_fract - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return x - floor(x); - } - }; - - template - struct compute_trunc - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return detail::functor1::call(trunc, x); - } - }; - - template - struct compute_round - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return detail::functor1::call(round, x); - } - }; - - template - struct compute_mod - { - GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'mod' only accept floating-point inputs. Include for integer inputs."); - return a - b * floor(a / b); - } - }; - - template - struct compute_min_vector - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y) - { - return detail::functor2::call(min, x, y); - } - }; - - template - struct compute_max_vector - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y) - { - return detail::functor2::call(max, x, y); - } - }; - - template - struct compute_clamp_vector - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& minVal, vec const& maxVal) - { - return min(max(x, minVal), maxVal); - } - }; - - template - struct compute_step_vector - { - GLM_FUNC_QUALIFIER static vec call(vec const& edge, vec const& x) - { - return mix(vec(1), vec(0), glm::lessThan(x, edge)); - } - }; - - template - struct compute_smoothstep_vector - { - GLM_FUNC_QUALIFIER static vec call(vec const& edge0, vec const& edge1, vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'smoothstep' only accept floating-point inputs"); - vec const tmp(clamp((x - edge0) / (edge1 - edge0), static_cast(0), static_cast(1))); - return tmp * tmp * (static_cast(3) - static_cast(2) * tmp); - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genFIType abs(genFIType x) - { - return detail::compute_abs::is_signed>::call(x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec abs(vec const& x) - { - return detail::compute_abs_vector::value>::call(x); - } - - // sign - // fast and works for any type - template - GLM_FUNC_QUALIFIER genFIType sign(genFIType x) - { - GLM_STATIC_ASSERT( - std::numeric_limits::is_iec559 || (std::numeric_limits::is_signed && std::numeric_limits::is_integer), - "'sign' only accept signed inputs"); - - return detail::compute_sign<1, genFIType, defaultp, - std::numeric_limits::is_iec559, detail::is_aligned::value>::call(vec<1, genFIType>(x)).x; - } - - template - GLM_FUNC_QUALIFIER vec sign(vec const& x) - { - GLM_STATIC_ASSERT( - std::numeric_limits::is_iec559 || (std::numeric_limits::is_signed && std::numeric_limits::is_integer), - "'sign' only accept signed inputs"); - - return detail::compute_sign::is_iec559, detail::is_aligned::value>::call(x); - } - - // floor - using ::std::floor; - template - GLM_FUNC_QUALIFIER vec floor(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'floor' only accept floating-point inputs."); - return detail::compute_floor::value>::call(x); - } - - template - GLM_FUNC_QUALIFIER vec trunc(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'trunc' only accept floating-point inputs"); - return detail::compute_trunc::value>::call(x); - } - - template - GLM_FUNC_QUALIFIER vec round(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'round' only accept floating-point inputs"); - return detail::compute_round::value>::call(x); - } - -/* - // roundEven - template - GLM_FUNC_QUALIFIER genType roundEven(genType const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'roundEven' only accept floating-point inputs"); - - return genType(int(x + genType(int(x) % 2))); - } -*/ - - // roundEven - template - GLM_FUNC_QUALIFIER genType roundEven(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'roundEven' only accept floating-point inputs"); - - int Integer = static_cast(x); - genType IntegerPart = static_cast(Integer); - genType FractionalPart = fract(x); - - if(FractionalPart > static_cast(0.5) || FractionalPart < static_cast(0.5)) - { - return round(x); - } - else if((Integer % 2) == 0) - { - return IntegerPart; - } - else if(x <= static_cast(0)) // Work around... - { - return IntegerPart - static_cast(1); - } - else - { - return IntegerPart + static_cast(1); - } - //else // Bug on MinGW 4.5.2 - //{ - // return mix(IntegerPart + genType(-1), IntegerPart + genType(1), x <= genType(0)); - //} - } - - template - GLM_FUNC_QUALIFIER vec roundEven(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'roundEven' only accept floating-point inputs"); - return detail::functor1::call(roundEven, x); - } - - // ceil - using ::std::ceil; - template - GLM_FUNC_QUALIFIER vec ceil(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'ceil' only accept floating-point inputs"); - return detail::compute_ceil::value>::call(x); - } - - // fract - template - GLM_FUNC_QUALIFIER genType fract(genType x) - { - return fract(vec<1, genType>(x)).x; - } - - template - GLM_FUNC_QUALIFIER vec fract(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fract' only accept floating-point inputs"); - return detail::compute_fract::value>::call(x); - } - - // mod - template - GLM_FUNC_QUALIFIER genType mod(genType x, genType y) - { -# if GLM_COMPILER & GLM_COMPILER_CUDA - // Another Cuda compiler bug https://github.com/g-truc/glm/issues/530 - vec<1, genType, defaultp> Result(mod(vec<1, genType, defaultp>(x), y)); - return Result.x; -# else - return mod(vec<1, genType, defaultp>(x), y).x; -# endif - } - - template - GLM_FUNC_QUALIFIER vec mod(vec const& x, T y) - { - return detail::compute_mod::value>::call(x, vec(y)); - } - - template - GLM_FUNC_QUALIFIER vec mod(vec const& x, vec const& y) - { - return detail::compute_mod::value>::call(x, y); - } - - // modf - template - GLM_FUNC_QUALIFIER genType modf(genType x, genType & i) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'modf' only accept floating-point inputs"); - return std::modf(x, &i); - } - - template - GLM_FUNC_QUALIFIER vec<1, T, Q> modf(vec<1, T, Q> const& x, vec<1, T, Q> & i) - { - return vec<1, T, Q>( - modf(x.x, i.x)); - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> modf(vec<2, T, Q> const& x, vec<2, T, Q> & i) - { - return vec<2, T, Q>( - modf(x.x, i.x), - modf(x.y, i.y)); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> modf(vec<3, T, Q> const& x, vec<3, T, Q> & i) - { - return vec<3, T, Q>( - modf(x.x, i.x), - modf(x.y, i.y), - modf(x.z, i.z)); - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> modf(vec<4, T, Q> const& x, vec<4, T, Q> & i) - { - return vec<4, T, Q>( - modf(x.x, i.x), - modf(x.y, i.y), - modf(x.z, i.z), - modf(x.w, i.w)); - } - - //// Only valid if (INT_MIN <= x-y <= INT_MAX) - //// min(x,y) - //r = y + ((x - y) & ((x - y) >> (sizeof(int) * - //CHAR_BIT - 1))); - //// max(x,y) - //r = x - ((x - y) & ((x - y) >> (sizeof(int) * - //CHAR_BIT - 1))); - - // min - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& a, T b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'min' only accept floating-point or integer inputs"); - return detail::compute_min_vector::value>::call(a, vec(b)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& a, vec const& b) - { - return detail::compute_min_vector::value>::call(a, b); - } - - // max - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& a, T b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'max' only accept floating-point or integer inputs"); - return detail::compute_max_vector::value>::call(a, vec(b)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& a, vec const& b) - { - return detail::compute_max_vector::value>::call(a, b); - } - - // clamp - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType clamp(genType x, genType minVal, genType maxVal) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'clamp' only accept floating-point or integer inputs"); - return min(max(x, minVal), maxVal); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec clamp(vec const& x, T minVal, T maxVal) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'clamp' only accept floating-point or integer inputs"); - return detail::compute_clamp_vector::value>::call(x, vec(minVal), vec(maxVal)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec clamp(vec const& x, vec const& minVal, vec const& maxVal) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'clamp' only accept floating-point or integer inputs"); - return detail::compute_clamp_vector::value>::call(x, minVal, maxVal); - } - - template - GLM_FUNC_QUALIFIER genTypeT mix(genTypeT x, genTypeT y, genTypeU a) - { - return detail::compute_mix::call(x, y, a); - } - - template - GLM_FUNC_QUALIFIER vec mix(vec const& x, vec const& y, U a) - { - return detail::compute_mix_scalar::value>::call(x, y, a); - } - - template - GLM_FUNC_QUALIFIER vec mix(vec const& x, vec const& y, vec const& a) - { - return detail::compute_mix_vector::value>::call(x, y, a); - } - - // step - template - GLM_FUNC_QUALIFIER genType step(genType edge, genType x) - { - return mix(static_cast(1), static_cast(0), x < edge); - } - - template - GLM_FUNC_QUALIFIER vec step(T edge, vec const& x) - { - return detail::compute_step_vector::value>::call(vec(edge), x); - } - - template - GLM_FUNC_QUALIFIER vec step(vec const& edge, vec const& x) - { - return detail::compute_step_vector::value>::call(edge, x); - } - - // smoothstep - template - GLM_FUNC_QUALIFIER genType smoothstep(genType edge0, genType edge1, genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'smoothstep' only accept floating-point inputs"); - - genType const tmp(clamp((x - edge0) / (edge1 - edge0), genType(0), genType(1))); - return tmp * tmp * (genType(3) - genType(2) * tmp); - } - - template - GLM_FUNC_QUALIFIER vec smoothstep(T edge0, T edge1, vec const& x) - { - return detail::compute_smoothstep_vector::value>::call(vec(edge0), vec(edge1), x); - } - - template - GLM_FUNC_QUALIFIER vec smoothstep(vec const& edge0, vec const& edge1, vec const& x) - { - return detail::compute_smoothstep_vector::value>::call(edge0, edge1, x); - } - -# if GLM_HAS_CXX11_STL - using std::isnan; -# else - template - GLM_FUNC_QUALIFIER bool isnan(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isnan' only accept floating-point inputs"); - -# if GLM_HAS_CXX11_STL - return std::isnan(x); -# elif GLM_COMPILER & GLM_COMPILER_VC - return _isnan(x) != 0; -# elif GLM_COMPILER & GLM_COMPILER_INTEL -# if GLM_PLATFORM & GLM_PLATFORM_WINDOWS - return _isnan(x) != 0; -# else - return ::isnan(x) != 0; -# endif -# elif (GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG)) && (GLM_PLATFORM & GLM_PLATFORM_ANDROID) && __cplusplus < 201103L - return _isnan(x) != 0; -# elif GLM_COMPILER & GLM_COMPILER_CUDA - return ::isnan(x) != 0; -# else - return std::isnan(x); -# endif - } -# endif - - template - GLM_FUNC_QUALIFIER vec isnan(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isnan' only accept floating-point inputs"); - - vec Result; - for (length_t l = 0; l < v.length(); ++l) - Result[l] = glm::isnan(v[l]); - return Result; - } - -# if GLM_HAS_CXX11_STL - using std::isinf; -# else - template - GLM_FUNC_QUALIFIER bool isinf(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isinf' only accept floating-point inputs"); - -# if GLM_HAS_CXX11_STL - return std::isinf(x); -# elif GLM_COMPILER & (GLM_COMPILER_INTEL | GLM_COMPILER_VC) -# if(GLM_PLATFORM & GLM_PLATFORM_WINDOWS) - return _fpclass(x) == _FPCLASS_NINF || _fpclass(x) == _FPCLASS_PINF; -# else - return ::isinf(x); -# endif -# elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG) -# if(GLM_PLATFORM & GLM_PLATFORM_ANDROID && __cplusplus < 201103L) - return _isinf(x) != 0; -# else - return std::isinf(x); -# endif -# elif GLM_COMPILER & GLM_COMPILER_CUDA - // http://developer.download.nvidia.com/compute/cuda/4_2/rel/toolkit/docs/online/group__CUDA__MATH__DOUBLE_g13431dd2b40b51f9139cbb7f50c18fab.html#g13431dd2b40b51f9139cbb7f50c18fab - return ::isinf(double(x)) != 0; -# else - return std::isinf(x); -# endif - } -# endif - - template - GLM_FUNC_QUALIFIER vec isinf(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isinf' only accept floating-point inputs"); - - vec Result; - for (length_t l = 0; l < v.length(); ++l) - Result[l] = glm::isinf(v[l]); - return Result; - } - - GLM_FUNC_QUALIFIER int floatBitsToInt(float const& v) - { - union - { - float in; - int out; - } u; - - u.in = v; - - return u.out; - } - - template - GLM_FUNC_QUALIFIER vec floatBitsToInt(vec const& v) - { - return reinterpret_cast&>(const_cast&>(v)); - } - - GLM_FUNC_QUALIFIER uint floatBitsToUint(float const& v) - { - union - { - float in; - uint out; - } u; - - u.in = v; - - return u.out; - } - - template - GLM_FUNC_QUALIFIER vec floatBitsToUint(vec const& v) - { - return reinterpret_cast&>(const_cast&>(v)); - } - - GLM_FUNC_QUALIFIER float intBitsToFloat(int const& v) - { - union - { - int in; - float out; - } u; - - u.in = v; - - return u.out; - } - - template - GLM_FUNC_QUALIFIER vec intBitsToFloat(vec const& v) - { - return reinterpret_cast&>(const_cast&>(v)); - } - - GLM_FUNC_QUALIFIER float uintBitsToFloat(uint const& v) - { - union - { - uint in; - float out; - } u; - - u.in = v; - - return u.out; - } - - template - GLM_FUNC_QUALIFIER vec uintBitsToFloat(vec const& v) - { - return reinterpret_cast&>(const_cast&>(v)); - } - -# if GLM_HAS_CXX11_STL - using std::fma; -# else - template - GLM_FUNC_QUALIFIER genType fma(genType const& a, genType const& b, genType const& c) - { - return a * b + c; - } -# endif - - template - GLM_FUNC_QUALIFIER genType frexp(genType x, int& exp) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'frexp' only accept floating-point inputs"); - - return std::frexp(x, &exp); - } - - template - GLM_FUNC_QUALIFIER vec frexp(vec const& v, vec& exp) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'frexp' only accept floating-point inputs"); - - vec Result; - for (length_t l = 0; l < v.length(); ++l) - Result[l] = std::frexp(v[l], &exp[l]); - return Result; - } - - template - GLM_FUNC_QUALIFIER genType ldexp(genType const& x, int const& exp) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'ldexp' only accept floating-point inputs"); - - return std::ldexp(x, exp); - } - - template - GLM_FUNC_QUALIFIER vec ldexp(vec const& v, vec const& exp) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'ldexp' only accept floating-point inputs"); - - vec Result; - for (length_t l = 0; l < v.length(); ++l) - Result[l] = std::ldexp(v[l], exp[l]); - return Result; - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_common_simd.inl" -#endif diff --git a/third_party/glm/detail/func_common_simd.inl b/third_party/glm/detail/func_common_simd.inl deleted file mode 100755 index ce0032d..0000000 --- a/third_party/glm/detail/func_common_simd.inl +++ /dev/null @@ -1,231 +0,0 @@ -/// @ref core -/// @file glm/detail/func_common_simd.inl - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -#include "../simd/common.h" - -#include - -namespace glm{ -namespace detail -{ - template - struct compute_abs_vector<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - vec<4, float, Q> result; - result.data = glm_vec4_abs(v.data); - return result; - } - }; - - template - struct compute_abs_vector<4, int, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v) - { - vec<4, int, Q> result; - result.data = glm_ivec4_abs(v.data); - return result; - } - }; - - template - struct compute_floor<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - vec<4, float, Q> result; - result.data = glm_vec4_floor(v.data); - return result; - } - }; - - template - struct compute_ceil<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - vec<4, float, Q> result; - result.data = glm_vec4_ceil(v.data); - return result; - } - }; - - template - struct compute_fract<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - vec<4, float, Q> result; - result.data = glm_vec4_fract(v.data); - return result; - } - }; - - template - struct compute_round<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - vec<4, float, Q> result; - result.data = glm_vec4_round(v.data); - return result; - } - }; - - template - struct compute_mod<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& y) - { - vec<4, float, Q> result; - result.data = glm_vec4_mod(x.data, y.data); - return result; - } - }; - - template - struct compute_min_vector<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) - { - vec<4, float, Q> result; - result.data = _mm_min_ps(v1.data, v2.data); - return result; - } - }; - - template - struct compute_min_vector<4, int, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) - { - vec<4, int, Q> result; - result.data = _mm_min_epi32(v1.data, v2.data); - return result; - } - }; - - template - struct compute_min_vector<4, uint, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2) - { - vec<4, uint, Q> result; - result.data = _mm_min_epu32(v1.data, v2.data); - return result; - } - }; - - template - struct compute_max_vector<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) - { - vec<4, float, Q> result; - result.data = _mm_max_ps(v1.data, v2.data); - return result; - } - }; - - template - struct compute_max_vector<4, int, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) - { - vec<4, int, Q> result; - result.data = _mm_max_epi32(v1.data, v2.data); - return result; - } - }; - - template - struct compute_max_vector<4, uint, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2) - { - vec<4, uint, Q> result; - result.data = _mm_max_epu32(v1.data, v2.data); - return result; - } - }; - - template - struct compute_clamp_vector<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& minVal, vec<4, float, Q> const& maxVal) - { - vec<4, float, Q> result; - result.data = _mm_min_ps(_mm_max_ps(x.data, minVal.data), maxVal.data); - return result; - } - }; - - template - struct compute_clamp_vector<4, int, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& x, vec<4, int, Q> const& minVal, vec<4, int, Q> const& maxVal) - { - vec<4, int, Q> result; - result.data = _mm_min_epi32(_mm_max_epi32(x.data, minVal.data), maxVal.data); - return result; - } - }; - - template - struct compute_clamp_vector<4, uint, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& x, vec<4, uint, Q> const& minVal, vec<4, uint, Q> const& maxVal) - { - vec<4, uint, Q> result; - result.data = _mm_min_epu32(_mm_max_epu32(x.data, minVal.data), maxVal.data); - return result; - } - }; - - template - struct compute_mix_vector<4, float, bool, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& y, vec<4, bool, Q> const& a) - { - __m128i const Load = _mm_set_epi32(-static_cast(a.w), -static_cast(a.z), -static_cast(a.y), -static_cast(a.x)); - __m128 const Mask = _mm_castsi128_ps(Load); - - vec<4, float, Q> Result; -# if 0 && GLM_ARCH & GLM_ARCH_AVX - Result.data = _mm_blendv_ps(x.data, y.data, Mask); -# else - Result.data = _mm_or_ps(_mm_and_ps(Mask, y.data), _mm_andnot_ps(Mask, x.data)); -# endif - return Result; - } - }; -/* FIXME - template - struct compute_step_vector - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& edge, vec<4, float, Q> const& x) - { - vec<4, float, Q> Result; - result.data = glm_vec4_step(edge.data, x.data); - return result; - } - }; -*/ - template - struct compute_smoothstep_vector<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& edge0, vec<4, float, Q> const& edge1, vec<4, float, Q> const& x) - { - vec<4, float, Q> Result; - Result.data = glm_vec4_smoothstep(edge0.data, edge1.data, x.data); - return Result; - } - }; -}//namespace detail -}//namespace glm - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/detail/func_exponential.inl b/third_party/glm/detail/func_exponential.inl deleted file mode 100755 index 2040d41..0000000 --- a/third_party/glm/detail/func_exponential.inl +++ /dev/null @@ -1,152 +0,0 @@ -/// @ref core -/// @file glm/detail/func_exponential.inl - -#include "../vector_relational.hpp" -#include "_vectorize.hpp" -#include -#include -#include - -namespace glm{ -namespace detail -{ -# if GLM_HAS_CXX11_STL - using std::log2; -# else - template - genType log2(genType Value) - { - return std::log(Value) * static_cast(1.4426950408889634073599246810019); - } -# endif - - template - struct compute_log2 - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'log2' only accept floating-point inputs. Include for integer inputs."); - - return detail::functor1::call(log2, v); - } - }; - - template - struct compute_sqrt - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return detail::functor1::call(std::sqrt, x); - } - }; - - template - struct compute_inversesqrt - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return static_cast(1) / sqrt(x); - } - }; - - template - struct compute_inversesqrt - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - vec tmp(x); - vec xhalf(tmp * 0.5f); - vec* p = reinterpret_cast*>(const_cast*>(&x)); - vec i = vec(0x5f375a86) - (*p >> vec(1)); - vec* ptmp = reinterpret_cast*>(&i); - tmp = *ptmp; - tmp = tmp * (1.5f - xhalf * tmp * tmp); - return tmp; - } - }; -}//namespace detail - - // pow - using std::pow; - template - GLM_FUNC_QUALIFIER vec pow(vec const& base, vec const& exponent) - { - return detail::functor2::call(pow, base, exponent); - } - - // exp - using std::exp; - template - GLM_FUNC_QUALIFIER vec exp(vec const& x) - { - return detail::functor1::call(exp, x); - } - - // log - using std::log; - template - GLM_FUNC_QUALIFIER vec log(vec const& x) - { - return detail::functor1::call(log, x); - } - -# if GLM_HAS_CXX11_STL - using std::exp2; -# else - //exp2, ln2 = 0.69314718055994530941723212145818f - template - GLM_FUNC_QUALIFIER genType exp2(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'exp2' only accept floating-point inputs"); - - return std::exp(static_cast(0.69314718055994530941723212145818) * x); - } -# endif - - template - GLM_FUNC_QUALIFIER vec exp2(vec const& x) - { - return detail::functor1::call(exp2, x); - } - - // log2, ln2 = 0.69314718055994530941723212145818f - template - GLM_FUNC_QUALIFIER genType log2(genType x) - { - return log2(vec<1, genType>(x)).x; - } - - template - GLM_FUNC_QUALIFIER vec log2(vec const& x) - { - return detail::compute_log2::is_iec559, detail::is_aligned::value>::call(x); - } - - // sqrt - using std::sqrt; - template - GLM_FUNC_QUALIFIER vec sqrt(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'sqrt' only accept floating-point inputs"); - return detail::compute_sqrt::value>::call(x); - } - - // inversesqrt - template - GLM_FUNC_QUALIFIER genType inversesqrt(genType x) - { - return static_cast(1) / sqrt(x); - } - - template - GLM_FUNC_QUALIFIER vec inversesqrt(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'inversesqrt' only accept floating-point inputs"); - return detail::compute_inversesqrt::value>::call(x); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_exponential_simd.inl" -#endif - diff --git a/third_party/glm/detail/func_exponential_simd.inl b/third_party/glm/detail/func_exponential_simd.inl deleted file mode 100755 index fb78951..0000000 --- a/third_party/glm/detail/func_exponential_simd.inl +++ /dev/null @@ -1,37 +0,0 @@ -/// @ref core -/// @file glm/detail/func_exponential_simd.inl - -#include "../simd/exponential.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -namespace glm{ -namespace detail -{ - template - struct compute_sqrt<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - vec<4, float, Q> Result; - Result.data = _mm_sqrt_ps(v.data); - return Result; - } - }; - -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE - template<> - struct compute_sqrt<4, float, aligned_lowp, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, aligned_lowp> call(vec<4, float, aligned_lowp> const& v) - { - vec<4, float, aligned_lowp> Result; - Result.data = glm_vec4_sqrt_lowp(v.data); - return Result; - } - }; -# endif -}//namespace detail -}//namespace glm - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/detail/func_geometric.inl b/third_party/glm/detail/func_geometric.inl deleted file mode 100755 index 9cde28f..0000000 --- a/third_party/glm/detail/func_geometric.inl +++ /dev/null @@ -1,243 +0,0 @@ -#include "../exponential.hpp" -#include "../common.hpp" - -namespace glm{ -namespace detail -{ - template - struct compute_length - { - GLM_FUNC_QUALIFIER static T call(vec const& v) - { - return sqrt(dot(v, v)); - } - }; - - template - struct compute_distance - { - GLM_FUNC_QUALIFIER static T call(vec const& p0, vec const& p1) - { - return length(p1 - p0); - } - }; - - template - struct compute_dot{}; - - template - struct compute_dot, T, Aligned> - { - GLM_FUNC_QUALIFIER static T call(vec<1, T, Q> const& a, vec<1, T, Q> const& b) - { - return a.x * b.x; - } - }; - - template - struct compute_dot, T, Aligned> - { - GLM_FUNC_QUALIFIER static T call(vec<2, T, Q> const& a, vec<2, T, Q> const& b) - { - vec<2, T, Q> tmp(a * b); - return tmp.x + tmp.y; - } - }; - - template - struct compute_dot, T, Aligned> - { - GLM_FUNC_QUALIFIER static T call(vec<3, T, Q> const& a, vec<3, T, Q> const& b) - { - vec<3, T, Q> tmp(a * b); - return tmp.x + tmp.y + tmp.z; - } - }; - - template - struct compute_dot, T, Aligned> - { - GLM_FUNC_QUALIFIER static T call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> tmp(a * b); - return (tmp.x + tmp.y) + (tmp.z + tmp.w); - } - }; - - template - struct compute_cross - { - GLM_FUNC_QUALIFIER static vec<3, T, Q> call(vec<3, T, Q> const& x, vec<3, T, Q> const& y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'cross' accepts only floating-point inputs"); - - return vec<3, T, Q>( - x.y * y.z - y.y * x.z, - x.z * y.x - y.z * x.x, - x.x * y.y - y.x * x.y); - } - }; - - template - struct compute_normalize - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); - - return v * inversesqrt(dot(v, v)); - } - }; - - template - struct compute_faceforward - { - GLM_FUNC_QUALIFIER static vec call(vec const& N, vec const& I, vec const& Nref) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); - - return dot(Nref, I) < static_cast(0) ? N : -N; - } - }; - - template - struct compute_reflect - { - GLM_FUNC_QUALIFIER static vec call(vec const& I, vec const& N) - { - return I - N * dot(N, I) * static_cast(2); - } - }; - - template - struct compute_refract - { - GLM_FUNC_QUALIFIER static vec call(vec const& I, vec const& N, T eta) - { - T const dotValue(dot(N, I)); - T const k(static_cast(1) - eta * eta * (static_cast(1) - dotValue * dotValue)); - vec const Result = - (k >= static_cast(0)) ? (eta * I - (eta * dotValue + std::sqrt(k)) * N) : vec(0); - return Result; - } - }; -}//namespace detail - - // length - template - GLM_FUNC_QUALIFIER genType length(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'length' accepts only floating-point inputs"); - - return abs(x); - } - - template - GLM_FUNC_QUALIFIER T length(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'length' accepts only floating-point inputs"); - - return detail::compute_length::value>::call(v); - } - - // distance - template - GLM_FUNC_QUALIFIER genType distance(genType const& p0, genType const& p1) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'distance' accepts only floating-point inputs"); - - return length(p1 - p0); - } - - template - GLM_FUNC_QUALIFIER T distance(vec const& p0, vec const& p1) - { - return detail::compute_distance::value>::call(p0, p1); - } - - // dot - template - GLM_FUNC_QUALIFIER T dot(T x, T y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'dot' accepts only floating-point inputs"); - return x * y; - } - - template - GLM_FUNC_QUALIFIER T dot(vec const& x, vec const& y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'dot' accepts only floating-point inputs"); - return detail::compute_dot, T, detail::is_aligned::value>::call(x, y); - } - - // cross - template - GLM_FUNC_QUALIFIER vec<3, T, Q> cross(vec<3, T, Q> const& x, vec<3, T, Q> const& y) - { - return detail::compute_cross::value>::call(x, y); - } -/* - // normalize - template - GLM_FUNC_QUALIFIER genType normalize(genType const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); - - return x < genType(0) ? genType(-1) : genType(1); - } -*/ - template - GLM_FUNC_QUALIFIER vec normalize(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); - - return detail::compute_normalize::value>::call(x); - } - - // faceforward - template - GLM_FUNC_QUALIFIER genType faceforward(genType const& N, genType const& I, genType const& Nref) - { - return dot(Nref, I) < static_cast(0) ? N : -N; - } - - template - GLM_FUNC_QUALIFIER vec faceforward(vec const& N, vec const& I, vec const& Nref) - { - return detail::compute_faceforward::value>::call(N, I, Nref); - } - - // reflect - template - GLM_FUNC_QUALIFIER genType reflect(genType const& I, genType const& N) - { - return I - N * dot(N, I) * genType(2); - } - - template - GLM_FUNC_QUALIFIER vec reflect(vec const& I, vec const& N) - { - return detail::compute_reflect::value>::call(I, N); - } - - // refract - template - GLM_FUNC_QUALIFIER genType refract(genType const& I, genType const& N, genType eta) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'refract' accepts only floating-point inputs"); - genType const dotValue(dot(N, I)); - genType const k(static_cast(1) - eta * eta * (static_cast(1) - dotValue * dotValue)); - return (eta * I - (eta * dotValue + sqrt(k)) * N) * static_cast(k >= static_cast(0)); - } - - template - GLM_FUNC_QUALIFIER vec refract(vec const& I, vec const& N, T eta) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'refract' accepts only floating-point inputs"); - return detail::compute_refract::value>::call(I, N, eta); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_geometric_simd.inl" -#endif diff --git a/third_party/glm/detail/func_geometric_simd.inl b/third_party/glm/detail/func_geometric_simd.inl deleted file mode 100755 index dfe3f4c..0000000 --- a/third_party/glm/detail/func_geometric_simd.inl +++ /dev/null @@ -1,165 +0,0 @@ -/// @ref core -/// @file glm/detail/func_geometric_simd.inl - -#include "../simd/geometric.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -namespace glm{ -namespace detail -{ - template - struct compute_length<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& v) - { - return _mm_cvtss_f32(glm_vec4_length(v.data)); - } - }; - - template - struct compute_distance<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& p0, vec<4, float, Q> const& p1) - { - return _mm_cvtss_f32(glm_vec4_distance(p0.data, p1.data)); - } - }; - - template - struct compute_dot, float, true> - { - GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& x, vec<4, float, Q> const& y) - { - return _mm_cvtss_f32(glm_vec1_dot(x.data, y.data)); - } - }; - - template - struct compute_cross - { - GLM_FUNC_QUALIFIER static vec<3, float, Q> call(vec<3, float, Q> const& a, vec<3, float, Q> const& b) - { - __m128 const set0 = _mm_set_ps(0.0f, a.z, a.y, a.x); - __m128 const set1 = _mm_set_ps(0.0f, b.z, b.y, b.x); - __m128 const xpd0 = glm_vec4_cross(set0, set1); - - vec<4, float, Q> Result; - Result.data = xpd0; - return vec<3, float, Q>(Result); - } - }; - - template - struct compute_normalize<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - vec<4, float, Q> Result; - Result.data = glm_vec4_normalize(v.data); - return Result; - } - }; - - template - struct compute_faceforward<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& N, vec<4, float, Q> const& I, vec<4, float, Q> const& Nref) - { - vec<4, float, Q> Result; - Result.data = glm_vec4_faceforward(N.data, I.data, Nref.data); - return Result; - } - }; - - template - struct compute_reflect<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& I, vec<4, float, Q> const& N) - { - vec<4, float, Q> Result; - Result.data = glm_vec4_reflect(I.data, N.data); - return Result; - } - }; - - template - struct compute_refract<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& I, vec<4, float, Q> const& N, float eta) - { - vec<4, float, Q> Result; - Result.data = glm_vec4_refract(I.data, N.data, _mm_set1_ps(eta)); - return Result; - } - }; -}//namespace detail -}//namespace glm - -#elif GLM_ARCH & GLM_ARCH_NEON_BIT -namespace glm{ -namespace detail -{ - template - struct compute_length<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& v) - { - return compute_dot, float, true>::call(v, v); - } - }; - - template - struct compute_distance<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& p0, vec<4, float, Q> const& p1) - { - return compute_length<4, float, Q, true>::call(p1 - p0); - } - }; - - - template - struct compute_dot, float, true> - { - GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& x, vec<4, float, Q> const& y) - { -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - float32x4_t v = vmulq_f32(x.data, y.data); - v = vpaddq_f32(v, v); - v = vpaddq_f32(v, v); - return vgetq_lane_f32(v, 0); -#else // Armv7a with Neon - float32x4_t p = vmulq_f32(x.data, y.data); - float32x2_t v = vpadd_f32(vget_low_f32(p), vget_high_f32(p)); - v = vpadd_f32(v, v); - return vget_lane_f32(v, 0); -#endif - } - }; - - template - struct compute_normalize<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - float32x4_t p = vmulq_f32(v.data, v.data); -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - p = vpaddq_f32(p, p); - p = vpaddq_f32(p, p); -#else - float32x2_t t = vpadd_f32(vget_low_f32(p), vget_high_f32(p)); - t = vpadd_f32(t, t); - p = vcombine_f32(t, t); -#endif - - float32x4_t vd = vrsqrteq_f32(p); - vec<4, float, Q> Result; - Result.data = vmulq_f32(v.data, vd); - return Result; - } - }; -}//namespace detail -}//namespace glm - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/detail/func_integer.inl b/third_party/glm/detail/func_integer.inl deleted file mode 100755 index 091e1e0..0000000 --- a/third_party/glm/detail/func_integer.inl +++ /dev/null @@ -1,372 +0,0 @@ -/// @ref core - -#include "_vectorize.hpp" -#if(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC) -# include -# pragma intrinsic(_BitScanReverse) -#endif//(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC) -#include - -#if !GLM_HAS_EXTENDED_INTEGER_TYPE -# if GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic ignored "-Wlong-long" -# endif -# if (GLM_COMPILER & GLM_COMPILER_CLANG) -# pragma clang diagnostic ignored "-Wc++11-long-long" -# endif -#endif - -namespace glm{ -namespace detail -{ - template - GLM_FUNC_QUALIFIER T mask(T Bits) - { - return Bits >= static_cast(sizeof(T) * 8) ? ~static_cast(0) : (static_cast(1) << Bits) - static_cast(1); - } - - template - struct compute_bitfieldReverseStep - { - GLM_FUNC_QUALIFIER static vec call(vec const& v, T, T) - { - return v; - } - }; - - template - struct compute_bitfieldReverseStep - { - GLM_FUNC_QUALIFIER static vec call(vec const& v, T Mask, T Shift) - { - return (v & Mask) << Shift | (v & (~Mask)) >> Shift; - } - }; - - template - struct compute_bitfieldBitCountStep - { - GLM_FUNC_QUALIFIER static vec call(vec const& v, T, T) - { - return v; - } - }; - - template - struct compute_bitfieldBitCountStep - { - GLM_FUNC_QUALIFIER static vec call(vec const& v, T Mask, T Shift) - { - return (v & Mask) + ((v >> Shift) & Mask); - } - }; - - template - struct compute_findLSB - { - GLM_FUNC_QUALIFIER static int call(genIUType Value) - { - if(Value == 0) - return -1; - - return glm::bitCount(~Value & (Value - static_cast(1))); - } - }; - -# if GLM_HAS_BITSCAN_WINDOWS - template - struct compute_findLSB - { - GLM_FUNC_QUALIFIER static int call(genIUType Value) - { - unsigned long Result(0); - unsigned char IsNotNull = _BitScanForward(&Result, *reinterpret_cast(&Value)); - return IsNotNull ? int(Result) : -1; - } - }; - -# if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32)) - template - struct compute_findLSB - { - GLM_FUNC_QUALIFIER static int call(genIUType Value) - { - unsigned long Result(0); - unsigned char IsNotNull = _BitScanForward64(&Result, *reinterpret_cast(&Value)); - return IsNotNull ? int(Result) : -1; - } - }; -# endif -# endif//GLM_HAS_BITSCAN_WINDOWS - - template - struct compute_findMSB_step_vec - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, T Shift) - { - return x | (x >> Shift); - } - }; - - template - struct compute_findMSB_step_vec - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, T) - { - return x; - } - }; - - template - struct compute_findMSB_vec - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - vec x(v); - x = compute_findMSB_step_vec= 8>::call(x, static_cast( 1)); - x = compute_findMSB_step_vec= 8>::call(x, static_cast( 2)); - x = compute_findMSB_step_vec= 8>::call(x, static_cast( 4)); - x = compute_findMSB_step_vec= 16>::call(x, static_cast( 8)); - x = compute_findMSB_step_vec= 32>::call(x, static_cast(16)); - x = compute_findMSB_step_vec= 64>::call(x, static_cast(32)); - return vec(sizeof(T) * 8 - 1) - glm::bitCount(~x); - } - }; - -# if GLM_HAS_BITSCAN_WINDOWS - template - GLM_FUNC_QUALIFIER int compute_findMSB_32(genIUType Value) - { - unsigned long Result(0); - unsigned char IsNotNull = _BitScanReverse(&Result, *reinterpret_cast(&Value)); - return IsNotNull ? int(Result) : -1; - } - - template - struct compute_findMSB_vec - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return detail::functor1::call(compute_findMSB_32, x); - } - }; - -# if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32)) - template - GLM_FUNC_QUALIFIER int compute_findMSB_64(genIUType Value) - { - unsigned long Result(0); - unsigned char IsNotNull = _BitScanReverse64(&Result, *reinterpret_cast(&Value)); - return IsNotNull ? int(Result) : -1; - } - - template - struct compute_findMSB_vec - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return detail::functor1::call(compute_findMSB_64, x); - } - }; -# endif -# endif//GLM_HAS_BITSCAN_WINDOWS -}//namespace detail - - // uaddCarry - GLM_FUNC_QUALIFIER uint uaddCarry(uint const& x, uint const& y, uint & Carry) - { - detail::uint64 const Value64(static_cast(x) + static_cast(y)); - detail::uint64 const Max32((static_cast(1) << static_cast(32)) - static_cast(1)); - Carry = Value64 > Max32 ? 1u : 0u; - return static_cast(Value64 % (Max32 + static_cast(1))); - } - - template - GLM_FUNC_QUALIFIER vec uaddCarry(vec const& x, vec const& y, vec& Carry) - { - vec Value64(vec(x) + vec(y)); - vec Max32((static_cast(1) << static_cast(32)) - static_cast(1)); - Carry = mix(vec(0), vec(1), greaterThan(Value64, Max32)); - return vec(Value64 % (Max32 + static_cast(1))); - } - - // usubBorrow - GLM_FUNC_QUALIFIER uint usubBorrow(uint const& x, uint const& y, uint & Borrow) - { - Borrow = x >= y ? static_cast(0) : static_cast(1); - if(y >= x) - return y - x; - else - return static_cast((static_cast(1) << static_cast(32)) + (static_cast(y) - static_cast(x))); - } - - template - GLM_FUNC_QUALIFIER vec usubBorrow(vec const& x, vec const& y, vec& Borrow) - { - Borrow = mix(vec(1), vec(0), greaterThanEqual(x, y)); - vec const YgeX(y - x); - vec const XgeY(vec((static_cast(1) << static_cast(32)) + (vec(y) - vec(x)))); - return mix(XgeY, YgeX, greaterThanEqual(y, x)); - } - - // umulExtended - GLM_FUNC_QUALIFIER void umulExtended(uint const& x, uint const& y, uint & msb, uint & lsb) - { - detail::uint64 Value64 = static_cast(x) * static_cast(y); - msb = static_cast(Value64 >> static_cast(32)); - lsb = static_cast(Value64); - } - - template - GLM_FUNC_QUALIFIER void umulExtended(vec const& x, vec const& y, vec& msb, vec& lsb) - { - vec Value64(vec(x) * vec(y)); - msb = vec(Value64 >> static_cast(32)); - lsb = vec(Value64); - } - - // imulExtended - GLM_FUNC_QUALIFIER void imulExtended(int x, int y, int& msb, int& lsb) - { - detail::int64 Value64 = static_cast(x) * static_cast(y); - msb = static_cast(Value64 >> static_cast(32)); - lsb = static_cast(Value64); - } - - template - GLM_FUNC_QUALIFIER void imulExtended(vec const& x, vec const& y, vec& msb, vec& lsb) - { - vec Value64(vec(x) * vec(y)); - lsb = vec(Value64 & static_cast(0xFFFFFFFF)); - msb = vec((Value64 >> static_cast(32)) & static_cast(0xFFFFFFFF)); - } - - // bitfieldExtract - template - GLM_FUNC_QUALIFIER genIUType bitfieldExtract(genIUType Value, int Offset, int Bits) - { - return bitfieldExtract(vec<1, genIUType>(Value), Offset, Bits).x; - } - - template - GLM_FUNC_QUALIFIER vec bitfieldExtract(vec const& Value, int Offset, int Bits) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldExtract' only accept integer inputs"); - - return (Value >> static_cast(Offset)) & static_cast(detail::mask(Bits)); - } - - // bitfieldInsert - template - GLM_FUNC_QUALIFIER genIUType bitfieldInsert(genIUType const& Base, genIUType const& Insert, int Offset, int Bits) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldInsert' only accept integer values"); - - return bitfieldInsert(vec<1, genIUType>(Base), vec<1, genIUType>(Insert), Offset, Bits).x; - } - - template - GLM_FUNC_QUALIFIER vec bitfieldInsert(vec const& Base, vec const& Insert, int Offset, int Bits) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldInsert' only accept integer values"); - - T const Mask = static_cast(detail::mask(Bits) << Offset); - return (Base & ~Mask) | ((Insert << static_cast(Offset)) & Mask); - } - - // bitfieldReverse - template - GLM_FUNC_QUALIFIER genIUType bitfieldReverse(genIUType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldReverse' only accept integer values"); - - return bitfieldReverse(glm::vec<1, genIUType, glm::defaultp>(x)).x; - } - - template - GLM_FUNC_QUALIFIER vec bitfieldReverse(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldReverse' only accept integer values"); - - vec x(v); - x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 2>::call(x, static_cast(0x5555555555555555ull), static_cast( 1)); - x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 4>::call(x, static_cast(0x3333333333333333ull), static_cast( 2)); - x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 8>::call(x, static_cast(0x0F0F0F0F0F0F0F0Full), static_cast( 4)); - x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 16>::call(x, static_cast(0x00FF00FF00FF00FFull), static_cast( 8)); - x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 32>::call(x, static_cast(0x0000FFFF0000FFFFull), static_cast(16)); - x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 64>::call(x, static_cast(0x00000000FFFFFFFFull), static_cast(32)); - return x; - } - - // bitCount - template - GLM_FUNC_QUALIFIER int bitCount(genIUType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitCount' only accept integer values"); - - return bitCount(glm::vec<1, genIUType, glm::defaultp>(x)).x; - } - - template - GLM_FUNC_QUALIFIER vec bitCount(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitCount' only accept integer values"); - -# if GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(push) -# pragma warning(disable : 4310) //cast truncates constant value -# endif - - vec::type, Q> x(*reinterpret_cast::type, Q> const *>(&v)); - x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 2>::call(x, typename detail::make_unsigned::type(0x5555555555555555ull), typename detail::make_unsigned::type( 1)); - x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 4>::call(x, typename detail::make_unsigned::type(0x3333333333333333ull), typename detail::make_unsigned::type( 2)); - x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 8>::call(x, typename detail::make_unsigned::type(0x0F0F0F0F0F0F0F0Full), typename detail::make_unsigned::type( 4)); - x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 16>::call(x, typename detail::make_unsigned::type(0x00FF00FF00FF00FFull), typename detail::make_unsigned::type( 8)); - x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 32>::call(x, typename detail::make_unsigned::type(0x0000FFFF0000FFFFull), typename detail::make_unsigned::type(16)); - x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 64>::call(x, typename detail::make_unsigned::type(0x00000000FFFFFFFFull), typename detail::make_unsigned::type(32)); - return vec(x); - -# if GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(pop) -# endif - } - - // findLSB - template - GLM_FUNC_QUALIFIER int findLSB(genIUType Value) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findLSB' only accept integer values"); - - return detail::compute_findLSB::call(Value); - } - - template - GLM_FUNC_QUALIFIER vec findLSB(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findLSB' only accept integer values"); - - return detail::functor1::call(findLSB, x); - } - - // findMSB - template - GLM_FUNC_QUALIFIER int findMSB(genIUType v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findMSB' only accept integer values"); - - return findMSB(vec<1, genIUType>(v)).x; - } - - template - GLM_FUNC_QUALIFIER vec findMSB(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findMSB' only accept integer values"); - - return detail::compute_findMSB_vec::call(v); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_integer_simd.inl" -#endif - diff --git a/third_party/glm/detail/func_integer_simd.inl b/third_party/glm/detail/func_integer_simd.inl deleted file mode 100755 index 8be6c9c..0000000 --- a/third_party/glm/detail/func_integer_simd.inl +++ /dev/null @@ -1,65 +0,0 @@ -#include "../simd/integer.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -namespace glm{ -namespace detail -{ - template - struct compute_bitfieldReverseStep<4, uint, Q, true, true> - { - GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v, uint Mask, uint Shift) - { - __m128i const set0 = v.data; - - __m128i const set1 = _mm_set1_epi32(static_cast(Mask)); - __m128i const and1 = _mm_and_si128(set0, set1); - __m128i const sft1 = _mm_slli_epi32(and1, Shift); - - __m128i const set2 = _mm_andnot_si128(set0, _mm_set1_epi32(-1)); - __m128i const and2 = _mm_and_si128(set0, set2); - __m128i const sft2 = _mm_srai_epi32(and2, Shift); - - __m128i const or0 = _mm_or_si128(sft1, sft2); - - return or0; - } - }; - - template - struct compute_bitfieldBitCountStep<4, uint, Q, true, true> - { - GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v, uint Mask, uint Shift) - { - __m128i const set0 = v.data; - - __m128i const set1 = _mm_set1_epi32(static_cast(Mask)); - __m128i const and0 = _mm_and_si128(set0, set1); - __m128i const sft0 = _mm_slli_epi32(set0, Shift); - __m128i const and1 = _mm_and_si128(sft0, set1); - __m128i const add0 = _mm_add_epi32(and0, and1); - - return add0; - } - }; -}//namespace detail - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template<> - GLM_FUNC_QUALIFIER int bitCount(uint x) - { - return _mm_popcnt_u32(x); - } - -# if(GLM_MODEL == GLM_MODEL_64) - template<> - GLM_FUNC_QUALIFIER int bitCount(detail::uint64 x) - { - return static_cast(_mm_popcnt_u64(x)); - } -# endif//GLM_MODEL -# endif//GLM_ARCH - -}//namespace glm - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/detail/func_matrix.inl b/third_party/glm/detail/func_matrix.inl deleted file mode 100755 index d980c6d..0000000 --- a/third_party/glm/detail/func_matrix.inl +++ /dev/null @@ -1,398 +0,0 @@ -#include "../geometric.hpp" -#include - -namespace glm{ -namespace detail -{ - template - struct compute_matrixCompMult - { - GLM_FUNC_QUALIFIER static mat call(mat const& x, mat const& y) - { - mat Result; - for(length_t i = 0; i < Result.length(); ++i) - Result[i] = x[i] * y[i]; - return Result; - } - }; - - template - struct compute_transpose{}; - - template - struct compute_transpose<2, 2, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<2, 2, T, Q> call(mat<2, 2, T, Q> const& m) - { - mat<2, 2, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - return Result; - } - }; - - template - struct compute_transpose<2, 3, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<3, 2, T, Q> call(mat<2, 3, T, Q> const& m) - { - mat<3,2, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - return Result; - } - }; - - template - struct compute_transpose<2, 4, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<4, 2, T, Q> call(mat<2, 4, T, Q> const& m) - { - mat<4, 2, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - Result[3][0] = m[0][3]; - Result[3][1] = m[1][3]; - return Result; - } - }; - - template - struct compute_transpose<3, 2, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<2, 3, T, Q> call(mat<3, 2, T, Q> const& m) - { - mat<2, 3, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - return Result; - } - }; - - template - struct compute_transpose<3, 3, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<3, 3, T, Q> call(mat<3, 3, T, Q> const& m) - { - mat<3, 3, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - Result[2][2] = m[2][2]; - return Result; - } - }; - - template - struct compute_transpose<3, 4, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<4, 3, T, Q> call(mat<3, 4, T, Q> const& m) - { - mat<4, 3, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - Result[2][2] = m[2][2]; - Result[3][0] = m[0][3]; - Result[3][1] = m[1][3]; - Result[3][2] = m[2][3]; - return Result; - } - }; - - template - struct compute_transpose<4, 2, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<2, 4, T, Q> call(mat<4, 2, T, Q> const& m) - { - mat<2, 4, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - Result[0][3] = m[3][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - Result[1][3] = m[3][1]; - return Result; - } - }; - - template - struct compute_transpose<4, 3, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<3, 4, T, Q> call(mat<4, 3, T, Q> const& m) - { - mat<3, 4, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - Result[0][3] = m[3][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - Result[1][3] = m[3][1]; - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - Result[2][2] = m[2][2]; - Result[2][3] = m[3][2]; - return Result; - } - }; - - template - struct compute_transpose<4, 4, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<4, 4, T, Q> call(mat<4, 4, T, Q> const& m) - { - mat<4, 4, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - Result[0][3] = m[3][0]; - - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - Result[1][3] = m[3][1]; - - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - Result[2][2] = m[2][2]; - Result[2][3] = m[3][2]; - - Result[3][0] = m[0][3]; - Result[3][1] = m[1][3]; - Result[3][2] = m[2][3]; - Result[3][3] = m[3][3]; - return Result; - } - }; - - template - struct compute_determinant{}; - - template - struct compute_determinant<2, 2, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static T call(mat<2, 2, T, Q> const& m) - { - return m[0][0] * m[1][1] - m[1][0] * m[0][1]; - } - }; - - template - struct compute_determinant<3, 3, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static T call(mat<3, 3, T, Q> const& m) - { - return - + m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2]) - - m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2]) - + m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2]); - } - }; - - template - struct compute_determinant<4, 4, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static T call(mat<4, 4, T, Q> const& m) - { - T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - - vec<4, T, Q> DetCof( - + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02), - - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04), - + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05), - - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05)); - - return - m[0][0] * DetCof[0] + m[0][1] * DetCof[1] + - m[0][2] * DetCof[2] + m[0][3] * DetCof[3]; - } - }; - - template - struct compute_inverse{}; - - template - struct compute_inverse<2, 2, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<2, 2, T, Q> call(mat<2, 2, T, Q> const& m) - { - T OneOverDeterminant = static_cast(1) / ( - + m[0][0] * m[1][1] - - m[1][0] * m[0][1]); - - mat<2, 2, T, Q> Inverse( - + m[1][1] * OneOverDeterminant, - - m[0][1] * OneOverDeterminant, - - m[1][0] * OneOverDeterminant, - + m[0][0] * OneOverDeterminant); - - return Inverse; - } - }; - - template - struct compute_inverse<3, 3, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<3, 3, T, Q> call(mat<3, 3, T, Q> const& m) - { - T OneOverDeterminant = static_cast(1) / ( - + m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2]) - - m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2]) - + m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2])); - - mat<3, 3, T, Q> Inverse; - Inverse[0][0] = + (m[1][1] * m[2][2] - m[2][1] * m[1][2]) * OneOverDeterminant; - Inverse[1][0] = - (m[1][0] * m[2][2] - m[2][0] * m[1][2]) * OneOverDeterminant; - Inverse[2][0] = + (m[1][0] * m[2][1] - m[2][0] * m[1][1]) * OneOverDeterminant; - Inverse[0][1] = - (m[0][1] * m[2][2] - m[2][1] * m[0][2]) * OneOverDeterminant; - Inverse[1][1] = + (m[0][0] * m[2][2] - m[2][0] * m[0][2]) * OneOverDeterminant; - Inverse[2][1] = - (m[0][0] * m[2][1] - m[2][0] * m[0][1]) * OneOverDeterminant; - Inverse[0][2] = + (m[0][1] * m[1][2] - m[1][1] * m[0][2]) * OneOverDeterminant; - Inverse[1][2] = - (m[0][0] * m[1][2] - m[1][0] * m[0][2]) * OneOverDeterminant; - Inverse[2][2] = + (m[0][0] * m[1][1] - m[1][0] * m[0][1]) * OneOverDeterminant; - - return Inverse; - } - }; - - template - struct compute_inverse<4, 4, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<4, 4, T, Q> call(mat<4, 4, T, Q> const& m) - { - T Coef00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - T Coef02 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; - T Coef03 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; - - T Coef04 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - T Coef06 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; - T Coef07 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; - - T Coef08 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - T Coef10 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; - T Coef11 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; - - T Coef12 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - T Coef14 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; - T Coef15 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; - - T Coef16 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - T Coef18 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; - T Coef19 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; - - T Coef20 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - T Coef22 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; - T Coef23 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - vec<4, T, Q> Fac0(Coef00, Coef00, Coef02, Coef03); - vec<4, T, Q> Fac1(Coef04, Coef04, Coef06, Coef07); - vec<4, T, Q> Fac2(Coef08, Coef08, Coef10, Coef11); - vec<4, T, Q> Fac3(Coef12, Coef12, Coef14, Coef15); - vec<4, T, Q> Fac4(Coef16, Coef16, Coef18, Coef19); - vec<4, T, Q> Fac5(Coef20, Coef20, Coef22, Coef23); - - vec<4, T, Q> Vec0(m[1][0], m[0][0], m[0][0], m[0][0]); - vec<4, T, Q> Vec1(m[1][1], m[0][1], m[0][1], m[0][1]); - vec<4, T, Q> Vec2(m[1][2], m[0][2], m[0][2], m[0][2]); - vec<4, T, Q> Vec3(m[1][3], m[0][3], m[0][3], m[0][3]); - - vec<4, T, Q> Inv0(Vec1 * Fac0 - Vec2 * Fac1 + Vec3 * Fac2); - vec<4, T, Q> Inv1(Vec0 * Fac0 - Vec2 * Fac3 + Vec3 * Fac4); - vec<4, T, Q> Inv2(Vec0 * Fac1 - Vec1 * Fac3 + Vec3 * Fac5); - vec<4, T, Q> Inv3(Vec0 * Fac2 - Vec1 * Fac4 + Vec2 * Fac5); - - vec<4, T, Q> SignA(+1, -1, +1, -1); - vec<4, T, Q> SignB(-1, +1, -1, +1); - mat<4, 4, T, Q> Inverse(Inv0 * SignA, Inv1 * SignB, Inv2 * SignA, Inv3 * SignB); - - vec<4, T, Q> Row0(Inverse[0][0], Inverse[1][0], Inverse[2][0], Inverse[3][0]); - - vec<4, T, Q> Dot0(m[0] * Row0); - T Dot1 = (Dot0.x + Dot0.y) + (Dot0.z + Dot0.w); - - T OneOverDeterminant = static_cast(1) / Dot1; - - return Inverse * OneOverDeterminant; - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER mat matrixCompMult(mat const& x, mat const& y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'matrixCompMult' only accept floating-point inputs"); - return detail::compute_matrixCompMult::value>::call(x, y); - } - - template - GLM_FUNC_QUALIFIER typename detail::outerProduct_trait::type outerProduct(vec const& c, vec const& r) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'outerProduct' only accept floating-point inputs"); - - typename detail::outerProduct_trait::type m; - for(length_t i = 0; i < m.length(); ++i) - m[i] = c * r[i]; - return m; - } - - template - GLM_FUNC_QUALIFIER typename mat::transpose_type transpose(mat const& m) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'transpose' only accept floating-point inputs"); - return detail::compute_transpose::value>::call(m); - } - - template - GLM_FUNC_QUALIFIER T determinant(mat const& m) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'determinant' only accept floating-point inputs"); - return detail::compute_determinant::value>::call(m); - } - - template - GLM_FUNC_QUALIFIER mat inverse(mat const& m) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'inverse' only accept floating-point inputs"); - return detail::compute_inverse::value>::call(m); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_matrix_simd.inl" -#endif - diff --git a/third_party/glm/detail/func_matrix_simd.inl b/third_party/glm/detail/func_matrix_simd.inl deleted file mode 100755 index f67ac66..0000000 --- a/third_party/glm/detail/func_matrix_simd.inl +++ /dev/null @@ -1,249 +0,0 @@ -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -#include "type_mat4x4.hpp" -#include "../geometric.hpp" -#include "../simd/matrix.h" -#include - -namespace glm{ -namespace detail -{ -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE - template - struct compute_matrixCompMult<4, 4, float, Q, true> - { - GLM_STATIC_ASSERT(detail::is_aligned::value, "Specialization requires aligned"); - - GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& x, mat<4, 4, float, Q> const& y) - { - mat<4, 4, float, Q> Result; - glm_mat4_matrixCompMult( - *static_cast(&x[0].data), - *static_cast(&y[0].data), - *static_cast(&Result[0].data)); - return Result; - } - }; -# endif - - template - struct compute_transpose<4, 4, float, Q, true> - { - GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m) - { - mat<4, 4, float, Q> Result; - glm_mat4_transpose(&m[0].data, &Result[0].data); - return Result; - } - }; - - template - struct compute_determinant<4, 4, float, Q, true> - { - GLM_FUNC_QUALIFIER static float call(mat<4, 4, float, Q> const& m) - { - return _mm_cvtss_f32(glm_mat4_determinant(&m[0].data)); - } - }; - - template - struct compute_inverse<4, 4, float, Q, true> - { - GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m) - { - mat<4, 4, float, Q> Result; - glm_mat4_inverse(&m[0].data, &Result[0].data); - return Result; - } - }; -}//namespace detail - -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE - template<> - GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_lowp> outerProduct<4, 4, float, aligned_lowp>(vec<4, float, aligned_lowp> const& c, vec<4, float, aligned_lowp> const& r) - { - __m128 NativeResult[4]; - glm_mat4_outerProduct(c.data, r.data, NativeResult); - mat<4, 4, float, aligned_lowp> Result; - std::memcpy(&Result[0], &NativeResult[0], sizeof(Result)); - return Result; - } - - template<> - GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_mediump> outerProduct<4, 4, float, aligned_mediump>(vec<4, float, aligned_mediump> const& c, vec<4, float, aligned_mediump> const& r) - { - __m128 NativeResult[4]; - glm_mat4_outerProduct(c.data, r.data, NativeResult); - mat<4, 4, float, aligned_mediump> Result; - std::memcpy(&Result[0], &NativeResult[0], sizeof(Result)); - return Result; - } - - template<> - GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_highp> outerProduct<4, 4, float, aligned_highp>(vec<4, float, aligned_highp> const& c, vec<4, float, aligned_highp> const& r) - { - __m128 NativeResult[4]; - glm_mat4_outerProduct(c.data, r.data, NativeResult); - mat<4, 4, float, aligned_highp> Result; - std::memcpy(&Result[0], &NativeResult[0], sizeof(Result)); - return Result; - } -# endif -}//namespace glm - -#elif GLM_ARCH & GLM_ARCH_NEON_BIT - -namespace glm { -#if GLM_LANG & GLM_LANG_CXX11_FLAG - template - GLM_FUNC_QUALIFIER - typename std::enable_if::value, mat<4, 4, float, Q>>::type - operator*(mat<4, 4, float, Q> const & m1, mat<4, 4, float, Q> const & m2) - { - auto MulRow = [&](int l) { - float32x4_t const SrcA = m2[l].data; - - float32x4_t r = neon::mul_lane(m1[0].data, SrcA, 0); - r = neon::madd_lane(r, m1[1].data, SrcA, 1); - r = neon::madd_lane(r, m1[2].data, SrcA, 2); - r = neon::madd_lane(r, m1[3].data, SrcA, 3); - - return r; - }; - - mat<4, 4, float, aligned_highp> Result; - Result[0].data = MulRow(0); - Result[1].data = MulRow(1); - Result[2].data = MulRow(2); - Result[3].data = MulRow(3); - - return Result; - } -#endif // CXX11 - - template - struct detail::compute_inverse<4, 4, float, Q, true> - { - GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m) - { - float32x4_t const& m0 = m[0].data; - float32x4_t const& m1 = m[1].data; - float32x4_t const& m2 = m[2].data; - float32x4_t const& m3 = m[3].data; - - // m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // m[1][2] * m[3][3] - m[3][2] * m[1][3]; - // m[1][2] * m[2][3] - m[2][2] * m[1][3]; - - float32x4_t Fac0; - { - float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2)); - float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3); - float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2); - float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3)); - Fac0 = w0 * w1 - w2 * w3; - } - - // m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // m[1][1] * m[3][3] - m[3][1] * m[1][3]; - // m[1][1] * m[2][3] - m[2][1] * m[1][3]; - - float32x4_t Fac1; - { - float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1)); - float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3); - float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1); - float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3)); - Fac1 = w0 * w1 - w2 * w3; - } - - // m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // m[1][1] * m[3][2] - m[3][1] * m[1][2]; - // m[1][1] * m[2][2] - m[2][1] * m[1][2]; - - float32x4_t Fac2; - { - float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1)); - float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2); - float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1); - float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2)); - Fac2 = w0 * w1 - w2 * w3; - } - - // m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // m[1][0] * m[3][3] - m[3][0] * m[1][3]; - // m[1][0] * m[2][3] - m[2][0] * m[1][3]; - - float32x4_t Fac3; - { - float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0)); - float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3); - float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0); - float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3)); - Fac3 = w0 * w1 - w2 * w3; - } - - // m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // m[1][0] * m[3][2] - m[3][0] * m[1][2]; - // m[1][0] * m[2][2] - m[2][0] * m[1][2]; - - float32x4_t Fac4; - { - float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0)); - float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2); - float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0); - float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2)); - Fac4 = w0 * w1 - w2 * w3; - } - - // m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // m[1][0] * m[3][1] - m[3][0] * m[1][1]; - // m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - float32x4_t Fac5; - { - float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0)); - float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1); - float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0); - float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1)); - Fac5 = w0 * w1 - w2 * w3; - } - - float32x4_t Vec0 = neon::copy_lane(neon::dupq_lane(m0, 0), 0, m1, 0); // (m[1][0], m[0][0], m[0][0], m[0][0]); - float32x4_t Vec1 = neon::copy_lane(neon::dupq_lane(m0, 1), 0, m1, 1); // (m[1][1], m[0][1], m[0][1], m[0][1]); - float32x4_t Vec2 = neon::copy_lane(neon::dupq_lane(m0, 2), 0, m1, 2); // (m[1][2], m[0][2], m[0][2], m[0][2]); - float32x4_t Vec3 = neon::copy_lane(neon::dupq_lane(m0, 3), 0, m1, 3); // (m[1][3], m[0][3], m[0][3], m[0][3]); - - float32x4_t Inv0 = Vec1 * Fac0 - Vec2 * Fac1 + Vec3 * Fac2; - float32x4_t Inv1 = Vec0 * Fac0 - Vec2 * Fac3 + Vec3 * Fac4; - float32x4_t Inv2 = Vec0 * Fac1 - Vec1 * Fac3 + Vec3 * Fac5; - float32x4_t Inv3 = Vec0 * Fac2 - Vec1 * Fac4 + Vec2 * Fac5; - - float32x4_t r0 = float32x4_t{-1, +1, -1, +1} * Inv0; - float32x4_t r1 = float32x4_t{+1, -1, +1, -1} * Inv1; - float32x4_t r2 = float32x4_t{-1, +1, -1, +1} * Inv2; - float32x4_t r3 = float32x4_t{+1, -1, +1, -1} * Inv3; - - float32x4_t det = neon::mul_lane(r0, m0, 0); - det = neon::madd_lane(det, r1, m0, 1); - det = neon::madd_lane(det, r2, m0, 2); - det = neon::madd_lane(det, r3, m0, 3); - - float32x4_t rdet = vdupq_n_f32(1 / vgetq_lane_f32(det, 0)); - - mat<4, 4, float, Q> r; - r[0].data = vmulq_f32(r0, rdet); - r[1].data = vmulq_f32(r1, rdet); - r[2].data = vmulq_f32(r2, rdet); - r[3].data = vmulq_f32(r3, rdet); - return r; - } - }; -}//namespace glm -#endif diff --git a/third_party/glm/detail/func_packing.inl b/third_party/glm/detail/func_packing.inl deleted file mode 100755 index 234b093..0000000 --- a/third_party/glm/detail/func_packing.inl +++ /dev/null @@ -1,189 +0,0 @@ -/// @ref core -/// @file glm/detail/func_packing.inl - -#include "../common.hpp" -#include "type_half.hpp" - -namespace glm -{ - GLM_FUNC_QUALIFIER uint packUnorm2x16(vec2 const& v) - { - union - { - unsigned short in[2]; - uint out; - } u; - - vec<2, unsigned short, defaultp> result(round(clamp(v, 0.0f, 1.0f) * 65535.0f)); - - u.in[0] = result[0]; - u.in[1] = result[1]; - - return u.out; - } - - GLM_FUNC_QUALIFIER vec2 unpackUnorm2x16(uint p) - { - union - { - uint in; - unsigned short out[2]; - } u; - - u.in = p; - - return vec2(u.out[0], u.out[1]) * 1.5259021896696421759365224689097e-5f; - } - - GLM_FUNC_QUALIFIER uint packSnorm2x16(vec2 const& v) - { - union - { - signed short in[2]; - uint out; - } u; - - vec<2, short, defaultp> result(round(clamp(v, -1.0f, 1.0f) * 32767.0f)); - - u.in[0] = result[0]; - u.in[1] = result[1]; - - return u.out; - } - - GLM_FUNC_QUALIFIER vec2 unpackSnorm2x16(uint p) - { - union - { - uint in; - signed short out[2]; - } u; - - u.in = p; - - return clamp(vec2(u.out[0], u.out[1]) * 3.0518509475997192297128208258309e-5f, -1.0f, 1.0f); - } - - GLM_FUNC_QUALIFIER uint packUnorm4x8(vec4 const& v) - { - union - { - unsigned char in[4]; - uint out; - } u; - - vec<4, unsigned char, defaultp> result(round(clamp(v, 0.0f, 1.0f) * 255.0f)); - - u.in[0] = result[0]; - u.in[1] = result[1]; - u.in[2] = result[2]; - u.in[3] = result[3]; - - return u.out; - } - - GLM_FUNC_QUALIFIER vec4 unpackUnorm4x8(uint p) - { - union - { - uint in; - unsigned char out[4]; - } u; - - u.in = p; - - return vec4(u.out[0], u.out[1], u.out[2], u.out[3]) * 0.0039215686274509803921568627451f; - } - - GLM_FUNC_QUALIFIER uint packSnorm4x8(vec4 const& v) - { - union - { - signed char in[4]; - uint out; - } u; - - vec<4, signed char, defaultp> result(round(clamp(v, -1.0f, 1.0f) * 127.0f)); - - u.in[0] = result[0]; - u.in[1] = result[1]; - u.in[2] = result[2]; - u.in[3] = result[3]; - - return u.out; - } - - GLM_FUNC_QUALIFIER glm::vec4 unpackSnorm4x8(uint p) - { - union - { - uint in; - signed char out[4]; - } u; - - u.in = p; - - return clamp(vec4(u.out[0], u.out[1], u.out[2], u.out[3]) * 0.0078740157480315f, -1.0f, 1.0f); - } - - GLM_FUNC_QUALIFIER double packDouble2x32(uvec2 const& v) - { - union - { - uint in[2]; - double out; - } u; - - u.in[0] = v[0]; - u.in[1] = v[1]; - - return u.out; - } - - GLM_FUNC_QUALIFIER uvec2 unpackDouble2x32(double v) - { - union - { - double in; - uint out[2]; - } u; - - u.in = v; - - return uvec2(u.out[0], u.out[1]); - } - - GLM_FUNC_QUALIFIER uint packHalf2x16(vec2 const& v) - { - union - { - signed short in[2]; - uint out; - } u; - - u.in[0] = detail::toFloat16(v.x); - u.in[1] = detail::toFloat16(v.y); - - return u.out; - } - - GLM_FUNC_QUALIFIER vec2 unpackHalf2x16(uint v) - { - union - { - uint in; - signed short out[2]; - } u; - - u.in = v; - - return vec2( - detail::toFloat32(u.out[0]), - detail::toFloat32(u.out[1])); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_packing_simd.inl" -#endif - diff --git a/third_party/glm/detail/func_packing_simd.inl b/third_party/glm/detail/func_packing_simd.inl deleted file mode 100755 index fd0fe8b..0000000 --- a/third_party/glm/detail/func_packing_simd.inl +++ /dev/null @@ -1,6 +0,0 @@ -namespace glm{ -namespace detail -{ - -}//namespace detail -}//namespace glm diff --git a/third_party/glm/detail/func_trigonometric.inl b/third_party/glm/detail/func_trigonometric.inl deleted file mode 100755 index e129dce..0000000 --- a/third_party/glm/detail/func_trigonometric.inl +++ /dev/null @@ -1,197 +0,0 @@ -#include "_vectorize.hpp" -#include -#include - -namespace glm -{ - // radians - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType radians(genType degrees) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'radians' only accept floating-point input"); - - return degrees * static_cast(0.01745329251994329576923690768489); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec radians(vec const& v) - { - return detail::functor1::call(radians, v); - } - - // degrees - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType degrees(genType radians) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'degrees' only accept floating-point input"); - - return radians * static_cast(57.295779513082320876798154814105); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec degrees(vec const& v) - { - return detail::functor1::call(degrees, v); - } - - // sin - using ::std::sin; - - template - GLM_FUNC_QUALIFIER vec sin(vec const& v) - { - return detail::functor1::call(sin, v); - } - - // cos - using std::cos; - - template - GLM_FUNC_QUALIFIER vec cos(vec const& v) - { - return detail::functor1::call(cos, v); - } - - // tan - using std::tan; - - template - GLM_FUNC_QUALIFIER vec tan(vec const& v) - { - return detail::functor1::call(tan, v); - } - - // asin - using std::asin; - - template - GLM_FUNC_QUALIFIER vec asin(vec const& v) - { - return detail::functor1::call(asin, v); - } - - // acos - using std::acos; - - template - GLM_FUNC_QUALIFIER vec acos(vec const& v) - { - return detail::functor1::call(acos, v); - } - - // atan - template - GLM_FUNC_QUALIFIER genType atan(genType y, genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'atan' only accept floating-point input"); - - return ::std::atan2(y, x); - } - - template - GLM_FUNC_QUALIFIER vec atan(vec const& a, vec const& b) - { - return detail::functor2::call(::std::atan2, a, b); - } - - using std::atan; - - template - GLM_FUNC_QUALIFIER vec atan(vec const& v) - { - return detail::functor1::call(atan, v); - } - - // sinh - using std::sinh; - - template - GLM_FUNC_QUALIFIER vec sinh(vec const& v) - { - return detail::functor1::call(sinh, v); - } - - // cosh - using std::cosh; - - template - GLM_FUNC_QUALIFIER vec cosh(vec const& v) - { - return detail::functor1::call(cosh, v); - } - - // tanh - using std::tanh; - - template - GLM_FUNC_QUALIFIER vec tanh(vec const& v) - { - return detail::functor1::call(tanh, v); - } - - // asinh -# if GLM_HAS_CXX11_STL - using std::asinh; -# else - template - GLM_FUNC_QUALIFIER genType asinh(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'asinh' only accept floating-point input"); - - return (x < static_cast(0) ? static_cast(-1) : (x > static_cast(0) ? static_cast(1) : static_cast(0))) * log(std::abs(x) + sqrt(static_cast(1) + x * x)); - } -# endif - - template - GLM_FUNC_QUALIFIER vec asinh(vec const& v) - { - return detail::functor1::call(asinh, v); - } - - // acosh -# if GLM_HAS_CXX11_STL - using std::acosh; -# else - template - GLM_FUNC_QUALIFIER genType acosh(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acosh' only accept floating-point input"); - - if(x < static_cast(1)) - return static_cast(0); - return log(x + sqrt(x * x - static_cast(1))); - } -# endif - - template - GLM_FUNC_QUALIFIER vec acosh(vec const& v) - { - return detail::functor1::call(acosh, v); - } - - // atanh -# if GLM_HAS_CXX11_STL - using std::atanh; -# else - template - GLM_FUNC_QUALIFIER genType atanh(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'atanh' only accept floating-point input"); - - if(std::abs(x) >= static_cast(1)) - return 0; - return static_cast(0.5) * log((static_cast(1) + x) / (static_cast(1) - x)); - } -# endif - - template - GLM_FUNC_QUALIFIER vec atanh(vec const& v) - { - return detail::functor1::call(atanh, v); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_trigonometric_simd.inl" -#endif - diff --git a/third_party/glm/detail/func_trigonometric_simd.inl b/third_party/glm/detail/func_trigonometric_simd.inl deleted file mode 100755 index e69de29..0000000 diff --git a/third_party/glm/detail/func_vector_relational.inl b/third_party/glm/detail/func_vector_relational.inl deleted file mode 100755 index 80c9e87..0000000 --- a/third_party/glm/detail/func_vector_relational.inl +++ /dev/null @@ -1,87 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec lessThan(vec const& x, vec const& y) - { - vec Result(true); - for(length_t i = 0; i < L; ++i) - Result[i] = x[i] < y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec lessThanEqual(vec const& x, vec const& y) - { - vec Result(true); - for(length_t i = 0; i < L; ++i) - Result[i] = x[i] <= y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec greaterThan(vec const& x, vec const& y) - { - vec Result(true); - for(length_t i = 0; i < L; ++i) - Result[i] = x[i] > y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec greaterThanEqual(vec const& x, vec const& y) - { - vec Result(true); - for(length_t i = 0; i < L; ++i) - Result[i] = x[i] >= y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y) - { - vec Result(true); - for(length_t i = 0; i < L; ++i) - Result[i] = x[i] == y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y) - { - vec Result(true); - for(length_t i = 0; i < L; ++i) - Result[i] = x[i] != y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool any(vec const& v) - { - bool Result = false; - for(length_t i = 0; i < L; ++i) - Result = Result || v[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool all(vec const& v) - { - bool Result = true; - for(length_t i = 0; i < L; ++i) - Result = Result && v[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec not_(vec const& v) - { - vec Result(true); - for(length_t i = 0; i < L; ++i) - Result[i] = !v[i]; - return Result; - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_vector_relational_simd.inl" -#endif diff --git a/third_party/glm/detail/func_vector_relational_simd.inl b/third_party/glm/detail/func_vector_relational_simd.inl deleted file mode 100755 index fd0fe8b..0000000 --- a/third_party/glm/detail/func_vector_relational_simd.inl +++ /dev/null @@ -1,6 +0,0 @@ -namespace glm{ -namespace detail -{ - -}//namespace detail -}//namespace glm diff --git a/third_party/glm/detail/glm.cpp b/third_party/glm/detail/glm.cpp deleted file mode 100755 index e0755bd..0000000 --- a/third_party/glm/detail/glm.cpp +++ /dev/null @@ -1,263 +0,0 @@ -/// @ref core -/// @file glm/glm.cpp - -#ifndef GLM_ENABLE_EXPERIMENTAL -#define GLM_ENABLE_EXPERIMENTAL -#endif -#include -#include -#include -#include -#include -#include - -namespace glm -{ -// tvec1 type explicit instantiation -template struct vec<1, uint8, lowp>; -template struct vec<1, uint16, lowp>; -template struct vec<1, uint32, lowp>; -template struct vec<1, uint64, lowp>; -template struct vec<1, int8, lowp>; -template struct vec<1, int16, lowp>; -template struct vec<1, int32, lowp>; -template struct vec<1, int64, lowp>; -template struct vec<1, float32, lowp>; -template struct vec<1, float64, lowp>; - -template struct vec<1, uint8, mediump>; -template struct vec<1, uint16, mediump>; -template struct vec<1, uint32, mediump>; -template struct vec<1, uint64, mediump>; -template struct vec<1, int8, mediump>; -template struct vec<1, int16, mediump>; -template struct vec<1, int32, mediump>; -template struct vec<1, int64, mediump>; -template struct vec<1, float32, mediump>; -template struct vec<1, float64, mediump>; - -template struct vec<1, uint8, highp>; -template struct vec<1, uint16, highp>; -template struct vec<1, uint32, highp>; -template struct vec<1, uint64, highp>; -template struct vec<1, int8, highp>; -template struct vec<1, int16, highp>; -template struct vec<1, int32, highp>; -template struct vec<1, int64, highp>; -template struct vec<1, float32, highp>; -template struct vec<1, float64, highp>; - -// tvec2 type explicit instantiation -template struct vec<2, uint8, lowp>; -template struct vec<2, uint16, lowp>; -template struct vec<2, uint32, lowp>; -template struct vec<2, uint64, lowp>; -template struct vec<2, int8, lowp>; -template struct vec<2, int16, lowp>; -template struct vec<2, int32, lowp>; -template struct vec<2, int64, lowp>; -template struct vec<2, float32, lowp>; -template struct vec<2, float64, lowp>; - -template struct vec<2, uint8, mediump>; -template struct vec<2, uint16, mediump>; -template struct vec<2, uint32, mediump>; -template struct vec<2, uint64, mediump>; -template struct vec<2, int8, mediump>; -template struct vec<2, int16, mediump>; -template struct vec<2, int32, mediump>; -template struct vec<2, int64, mediump>; -template struct vec<2, float32, mediump>; -template struct vec<2, float64, mediump>; - -template struct vec<2, uint8, highp>; -template struct vec<2, uint16, highp>; -template struct vec<2, uint32, highp>; -template struct vec<2, uint64, highp>; -template struct vec<2, int8, highp>; -template struct vec<2, int16, highp>; -template struct vec<2, int32, highp>; -template struct vec<2, int64, highp>; -template struct vec<2, float32, highp>; -template struct vec<2, float64, highp>; - -// tvec3 type explicit instantiation -template struct vec<3, uint8, lowp>; -template struct vec<3, uint16, lowp>; -template struct vec<3, uint32, lowp>; -template struct vec<3, uint64, lowp>; -template struct vec<3, int8, lowp>; -template struct vec<3, int16, lowp>; -template struct vec<3, int32, lowp>; -template struct vec<3, int64, lowp>; -template struct vec<3, float32, lowp>; -template struct vec<3, float64, lowp>; - -template struct vec<3, uint8, mediump>; -template struct vec<3, uint16, mediump>; -template struct vec<3, uint32, mediump>; -template struct vec<3, uint64, mediump>; -template struct vec<3, int8, mediump>; -template struct vec<3, int16, mediump>; -template struct vec<3, int32, mediump>; -template struct vec<3, int64, mediump>; -template struct vec<3, float32, mediump>; -template struct vec<3, float64, mediump>; - -template struct vec<3, uint8, highp>; -template struct vec<3, uint16, highp>; -template struct vec<3, uint32, highp>; -template struct vec<3, uint64, highp>; -template struct vec<3, int8, highp>; -template struct vec<3, int16, highp>; -template struct vec<3, int32, highp>; -template struct vec<3, int64, highp>; -template struct vec<3, float32, highp>; -template struct vec<3, float64, highp>; - -// tvec4 type explicit instantiation -template struct vec<4, uint8, lowp>; -template struct vec<4, uint16, lowp>; -template struct vec<4, uint32, lowp>; -template struct vec<4, uint64, lowp>; -template struct vec<4, int8, lowp>; -template struct vec<4, int16, lowp>; -template struct vec<4, int32, lowp>; -template struct vec<4, int64, lowp>; -template struct vec<4, float32, lowp>; -template struct vec<4, float64, lowp>; - -template struct vec<4, uint8, mediump>; -template struct vec<4, uint16, mediump>; -template struct vec<4, uint32, mediump>; -template struct vec<4, uint64, mediump>; -template struct vec<4, int8, mediump>; -template struct vec<4, int16, mediump>; -template struct vec<4, int32, mediump>; -template struct vec<4, int64, mediump>; -template struct vec<4, float32, mediump>; -template struct vec<4, float64, mediump>; - -template struct vec<4, uint8, highp>; -template struct vec<4, uint16, highp>; -template struct vec<4, uint32, highp>; -template struct vec<4, uint64, highp>; -template struct vec<4, int8, highp>; -template struct vec<4, int16, highp>; -template struct vec<4, int32, highp>; -template struct vec<4, int64, highp>; -template struct vec<4, float32, highp>; -template struct vec<4, float64, highp>; - -// tmat2x2 type explicit instantiation -template struct mat<2, 2, float32, lowp>; -template struct mat<2, 2, float64, lowp>; - -template struct mat<2, 2, float32, mediump>; -template struct mat<2, 2, float64, mediump>; - -template struct mat<2, 2, float32, highp>; -template struct mat<2, 2, float64, highp>; - -// tmat2x3 type explicit instantiation -template struct mat<2, 3, float32, lowp>; -template struct mat<2, 3, float64, lowp>; - -template struct mat<2, 3, float32, mediump>; -template struct mat<2, 3, float64, mediump>; - -template struct mat<2, 3, float32, highp>; -template struct mat<2, 3, float64, highp>; - -// tmat2x4 type explicit instantiation -template struct mat<2, 4, float32, lowp>; -template struct mat<2, 4, float64, lowp>; - -template struct mat<2, 4, float32, mediump>; -template struct mat<2, 4, float64, mediump>; - -template struct mat<2, 4, float32, highp>; -template struct mat<2, 4, float64, highp>; - -// tmat3x2 type explicit instantiation -template struct mat<3, 2, float32, lowp>; -template struct mat<3, 2, float64, lowp>; - -template struct mat<3, 2, float32, mediump>; -template struct mat<3, 2, float64, mediump>; - -template struct mat<3, 2, float32, highp>; -template struct mat<3, 2, float64, highp>; - -// tmat3x3 type explicit instantiation -template struct mat<3, 3, float32, lowp>; -template struct mat<3, 3, float64, lowp>; - -template struct mat<3, 3, float32, mediump>; -template struct mat<3, 3, float64, mediump>; - -template struct mat<3, 3, float32, highp>; -template struct mat<3, 3, float64, highp>; - -// tmat3x4 type explicit instantiation -template struct mat<3, 4, float32, lowp>; -template struct mat<3, 4, float64, lowp>; - -template struct mat<3, 4, float32, mediump>; -template struct mat<3, 4, float64, mediump>; - -template struct mat<3, 4, float32, highp>; -template struct mat<3, 4, float64, highp>; - -// tmat4x2 type explicit instantiation -template struct mat<4, 2, float32, lowp>; -template struct mat<4, 2, float64, lowp>; - -template struct mat<4, 2, float32, mediump>; -template struct mat<4, 2, float64, mediump>; - -template struct mat<4, 2, float32, highp>; -template struct mat<4, 2, float64, highp>; - -// tmat4x3 type explicit instantiation -template struct mat<4, 3, float32, lowp>; -template struct mat<4, 3, float64, lowp>; - -template struct mat<4, 3, float32, mediump>; -template struct mat<4, 3, float64, mediump>; - -template struct mat<4, 3, float32, highp>; -template struct mat<4, 3, float64, highp>; - -// tmat4x4 type explicit instantiation -template struct mat<4, 4, float32, lowp>; -template struct mat<4, 4, float64, lowp>; - -template struct mat<4, 4, float32, mediump>; -template struct mat<4, 4, float64, mediump>; - -template struct mat<4, 4, float32, highp>; -template struct mat<4, 4, float64, highp>; - -// tquat type explicit instantiation -template struct qua; -template struct qua; - -template struct qua; -template struct qua; - -template struct qua; -template struct qua; - -//tdualquat type explicit instantiation -template struct tdualquat; -template struct tdualquat; - -template struct tdualquat; -template struct tdualquat; - -template struct tdualquat; -template struct tdualquat; - -}//namespace glm - diff --git a/third_party/glm/detail/qualifier.hpp b/third_party/glm/detail/qualifier.hpp deleted file mode 100755 index b6c9df0..0000000 --- a/third_party/glm/detail/qualifier.hpp +++ /dev/null @@ -1,230 +0,0 @@ -#pragma once - -#include "setup.hpp" - -namespace glm -{ - /// Qualify GLM types in term of alignment (packed, aligned) and precision in term of ULPs (lowp, mediump, highp) - enum qualifier - { - packed_highp, ///< Typed data is tightly packed in memory and operations are executed with high precision in term of ULPs - packed_mediump, ///< Typed data is tightly packed in memory and operations are executed with medium precision in term of ULPs for higher performance - packed_lowp, ///< Typed data is tightly packed in memory and operations are executed with low precision in term of ULPs to maximize performance - -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE - aligned_highp, ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs - aligned_mediump, ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs for higher performance - aligned_lowp, // ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs to maximize performance - aligned = aligned_highp, ///< By default aligned qualifier is also high precision -# endif - - highp = packed_highp, ///< By default highp qualifier is also packed - mediump = packed_mediump, ///< By default mediump qualifier is also packed - lowp = packed_lowp, ///< By default lowp qualifier is also packed - packed = packed_highp, ///< By default packed qualifier is also high precision - -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE && defined(GLM_FORCE_DEFAULT_ALIGNED_GENTYPES) - defaultp = aligned_highp -# else - defaultp = highp -# endif - }; - - typedef qualifier precision; - - template struct vec; - template struct mat; - template struct qua; - -# if GLM_HAS_TEMPLATE_ALIASES - template using tvec1 = vec<1, T, Q>; - template using tvec2 = vec<2, T, Q>; - template using tvec3 = vec<3, T, Q>; - template using tvec4 = vec<4, T, Q>; - template using tmat2x2 = mat<2, 2, T, Q>; - template using tmat2x3 = mat<2, 3, T, Q>; - template using tmat2x4 = mat<2, 4, T, Q>; - template using tmat3x2 = mat<3, 2, T, Q>; - template using tmat3x3 = mat<3, 3, T, Q>; - template using tmat3x4 = mat<3, 4, T, Q>; - template using tmat4x2 = mat<4, 2, T, Q>; - template using tmat4x3 = mat<4, 3, T, Q>; - template using tmat4x4 = mat<4, 4, T, Q>; - template using tquat = qua; -# endif - -namespace detail -{ - template - struct is_aligned - { - static const bool value = false; - }; - -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE - template<> - struct is_aligned - { - static const bool value = true; - }; - - template<> - struct is_aligned - { - static const bool value = true; - }; - - template<> - struct is_aligned - { - static const bool value = true; - }; -# endif - - template - struct storage - { - typedef struct type { - T data[L]; - } type; - }; - -# if GLM_HAS_ALIGNOF - template - struct storage - { - typedef struct alignas(L * sizeof(T)) type { - T data[L]; - } type; - }; - - template - struct storage<3, T, true> - { - typedef struct alignas(4 * sizeof(T)) type { - T data[4]; - } type; - }; -# endif - -# if GLM_ARCH & GLM_ARCH_SSE2_BIT - template<> - struct storage<4, float, true> - { - typedef glm_f32vec4 type; - }; - - template<> - struct storage<4, int, true> - { - typedef glm_i32vec4 type; - }; - - template<> - struct storage<4, unsigned int, true> - { - typedef glm_u32vec4 type; - }; - - template<> - struct storage<2, double, true> - { - typedef glm_f64vec2 type; - }; - - template<> - struct storage<2, detail::int64, true> - { - typedef glm_i64vec2 type; - }; - - template<> - struct storage<2, detail::uint64, true> - { - typedef glm_u64vec2 type; - }; -# endif - -# if (GLM_ARCH & GLM_ARCH_AVX_BIT) - template<> - struct storage<4, double, true> - { - typedef glm_f64vec4 type; - }; -# endif - -# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) - template<> - struct storage<4, detail::int64, true> - { - typedef glm_i64vec4 type; - }; - - template<> - struct storage<4, detail::uint64, true> - { - typedef glm_u64vec4 type; - }; -# endif - -# if GLM_ARCH & GLM_ARCH_NEON_BIT - template<> - struct storage<4, float, true> - { - typedef glm_f32vec4 type; - }; - - template<> - struct storage<4, int, true> - { - typedef glm_i32vec4 type; - }; - - template<> - struct storage<4, unsigned int, true> - { - typedef glm_u32vec4 type; - }; -# endif - - enum genTypeEnum - { - GENTYPE_VEC, - GENTYPE_MAT, - GENTYPE_QUAT - }; - - template - struct genTypeTrait - {}; - - template - struct genTypeTrait > - { - static const genTypeEnum GENTYPE = GENTYPE_MAT; - }; - - template - struct init_gentype - { - }; - - template - struct init_gentype - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genType identity() - { - return genType(1, 0, 0, 0); - } - }; - - template - struct init_gentype - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genType identity() - { - return genType(1); - } - }; -}//namespace detail -}//namespace glm diff --git a/third_party/glm/detail/setup.hpp b/third_party/glm/detail/setup.hpp deleted file mode 100755 index 07db656..0000000 --- a/third_party/glm/detail/setup.hpp +++ /dev/null @@ -1,1135 +0,0 @@ -#ifndef GLM_SETUP_INCLUDED - -#include -#include - -#define GLM_VERSION_MAJOR 0 -#define GLM_VERSION_MINOR 9 -#define GLM_VERSION_PATCH 9 -#define GLM_VERSION_REVISION 7 -#define GLM_VERSION 997 -#define GLM_VERSION_MESSAGE "GLM: version 0.9.9.7" - -#define GLM_SETUP_INCLUDED GLM_VERSION - -/////////////////////////////////////////////////////////////////////////////////// -// Active states - -#define GLM_DISABLE 0 -#define GLM_ENABLE 1 - -/////////////////////////////////////////////////////////////////////////////////// -// Messages - -#if defined(GLM_FORCE_MESSAGES) -# define GLM_MESSAGES GLM_ENABLE -#else -# define GLM_MESSAGES GLM_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Detect the platform - -#include "../simd/platform.h" - -/////////////////////////////////////////////////////////////////////////////////// -// Build model - -#if defined(_M_ARM64) || defined(__LP64__) || defined(_M_X64) || defined(__ppc64__) || defined(__x86_64__) -# define GLM_MODEL GLM_MODEL_64 -#elif defined(__i386__) || defined(__ppc__) || defined(__ILP32__) || defined(_M_ARM) -# define GLM_MODEL GLM_MODEL_32 -#else -# define GLM_MODEL GLM_MODEL_32 -#endif// - -#if !defined(GLM_MODEL) && GLM_COMPILER != 0 -# error "GLM_MODEL undefined, your compiler may not be supported by GLM. Add #define GLM_MODEL 0 to ignore this message." -#endif//GLM_MODEL - -/////////////////////////////////////////////////////////////////////////////////// -// C++ Version - -// User defines: GLM_FORCE_CXX98, GLM_FORCE_CXX03, GLM_FORCE_CXX11, GLM_FORCE_CXX14, GLM_FORCE_CXX17, GLM_FORCE_CXX2A - -#define GLM_LANG_CXX98_FLAG (1 << 1) -#define GLM_LANG_CXX03_FLAG (1 << 2) -#define GLM_LANG_CXX0X_FLAG (1 << 3) -#define GLM_LANG_CXX11_FLAG (1 << 4) -#define GLM_LANG_CXX14_FLAG (1 << 5) -#define GLM_LANG_CXX17_FLAG (1 << 6) -#define GLM_LANG_CXX2A_FLAG (1 << 7) -#define GLM_LANG_CXXMS_FLAG (1 << 8) -#define GLM_LANG_CXXGNU_FLAG (1 << 9) - -#define GLM_LANG_CXX98 GLM_LANG_CXX98_FLAG -#define GLM_LANG_CXX03 (GLM_LANG_CXX98 | GLM_LANG_CXX03_FLAG) -#define GLM_LANG_CXX0X (GLM_LANG_CXX03 | GLM_LANG_CXX0X_FLAG) -#define GLM_LANG_CXX11 (GLM_LANG_CXX0X | GLM_LANG_CXX11_FLAG) -#define GLM_LANG_CXX14 (GLM_LANG_CXX11 | GLM_LANG_CXX14_FLAG) -#define GLM_LANG_CXX17 (GLM_LANG_CXX14 | GLM_LANG_CXX17_FLAG) -#define GLM_LANG_CXX2A (GLM_LANG_CXX17 | GLM_LANG_CXX2A_FLAG) -#define GLM_LANG_CXXMS GLM_LANG_CXXMS_FLAG -#define GLM_LANG_CXXGNU GLM_LANG_CXXGNU_FLAG - -#if (defined(_MSC_EXTENSIONS)) -# define GLM_LANG_EXT GLM_LANG_CXXMS_FLAG -#elif ((GLM_COMPILER & (GLM_COMPILER_CLANG | GLM_COMPILER_GCC)) && (GLM_ARCH & GLM_ARCH_SIMD_BIT)) -# define GLM_LANG_EXT GLM_LANG_CXXMS_FLAG -#else -# define GLM_LANG_EXT 0 -#endif - -#if (defined(GLM_FORCE_CXX_UNKNOWN)) -# define GLM_LANG 0 -#elif defined(GLM_FORCE_CXX2A) -# define GLM_LANG (GLM_LANG_CXX2A | GLM_LANG_EXT) -# define GLM_LANG_STL11_FORCED -#elif defined(GLM_FORCE_CXX17) -# define GLM_LANG (GLM_LANG_CXX17 | GLM_LANG_EXT) -# define GLM_LANG_STL11_FORCED -#elif defined(GLM_FORCE_CXX14) -# define GLM_LANG (GLM_LANG_CXX14 | GLM_LANG_EXT) -# define GLM_LANG_STL11_FORCED -#elif defined(GLM_FORCE_CXX11) -# define GLM_LANG (GLM_LANG_CXX11 | GLM_LANG_EXT) -# define GLM_LANG_STL11_FORCED -#elif defined(GLM_FORCE_CXX03) -# define GLM_LANG (GLM_LANG_CXX03 | GLM_LANG_EXT) -#elif defined(GLM_FORCE_CXX98) -# define GLM_LANG (GLM_LANG_CXX98 | GLM_LANG_EXT) -#else -# if GLM_COMPILER & GLM_COMPILER_VC && defined(_MSVC_LANG) -# if GLM_COMPILER >= GLM_COMPILER_VC15_7 -# define GLM_LANG_PLATFORM _MSVC_LANG -# elif GLM_COMPILER >= GLM_COMPILER_VC15 -# if _MSVC_LANG > 201402L -# define GLM_LANG_PLATFORM 201402L -# else -# define GLM_LANG_PLATFORM _MSVC_LANG -# endif -# else -# define GLM_LANG_PLATFORM 0 -# endif -# else -# define GLM_LANG_PLATFORM 0 -# endif - -# if __cplusplus > 201703L || GLM_LANG_PLATFORM > 201703L -# define GLM_LANG (GLM_LANG_CXX2A | GLM_LANG_EXT) -# elif __cplusplus == 201703L || GLM_LANG_PLATFORM == 201703L -# define GLM_LANG (GLM_LANG_CXX17 | GLM_LANG_EXT) -# elif __cplusplus == 201402L || __cplusplus == 201500L || GLM_LANG_PLATFORM == 201402L -# define GLM_LANG (GLM_LANG_CXX14 | GLM_LANG_EXT) -# elif __cplusplus == 201103L || GLM_LANG_PLATFORM == 201103L -# define GLM_LANG (GLM_LANG_CXX11 | GLM_LANG_EXT) -# elif defined(__INTEL_CXX11_MODE__) || defined(_MSC_VER) || defined(__GXX_EXPERIMENTAL_CXX0X__) -# define GLM_LANG (GLM_LANG_CXX0X | GLM_LANG_EXT) -# elif __cplusplus == 199711L -# define GLM_LANG (GLM_LANG_CXX98 | GLM_LANG_EXT) -# else -# define GLM_LANG (0 | GLM_LANG_EXT) -# endif -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Has of C++ features - -// http://clang.llvm.org/cxx_status.html -// http://gcc.gnu.org/projects/cxx0x.html -// http://msdn.microsoft.com/en-us/library/vstudio/hh567368(v=vs.120).aspx - -// Android has multiple STLs but C++11 STL detection doesn't always work #284 #564 -#if GLM_PLATFORM == GLM_PLATFORM_ANDROID && !defined(GLM_LANG_STL11_FORCED) -# define GLM_HAS_CXX11_STL 0 -#elif GLM_COMPILER & GLM_COMPILER_CLANG -# if (defined(_LIBCPP_VERSION) || (GLM_LANG & GLM_LANG_CXX11_FLAG) || defined(GLM_LANG_STL11_FORCED)) -# define GLM_HAS_CXX11_STL 1 -# else -# define GLM_HAS_CXX11_STL 0 -# endif -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_CXX11_STL 1 -#else -# define GLM_HAS_CXX11_STL ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_GCC) && (GLM_COMPILER >= GLM_COMPILER_GCC48)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ - ((GLM_PLATFORM != GLM_PLATFORM_WINDOWS) && (GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL15)))) -#endif - -// N1720 -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_STATIC_ASSERT __has_feature(cxx_static_assert) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_STATIC_ASSERT 1 -#else -# define GLM_HAS_STATIC_ASSERT ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC)))) -#endif - -// N1988 -#if GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_EXTENDED_INTEGER_TYPE 1 -#else -# define GLM_HAS_EXTENDED_INTEGER_TYPE (\ - ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_VC)) || \ - ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_CUDA)) || \ - ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_CLANG))) -#endif - -// N2672 Initializer lists http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2672.htm -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_INITIALIZER_LISTS __has_feature(cxx_generalized_initializers) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_INITIALIZER_LISTS 1 -#else -# define GLM_HAS_INITIALIZER_LISTS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15)) || \ - ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL14)) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA)))) -#endif - -// N2544 Unrestricted unions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_UNRESTRICTED_UNIONS __has_feature(cxx_unrestricted_unions) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_UNRESTRICTED_UNIONS 1 -#else -# define GLM_HAS_UNRESTRICTED_UNIONS (GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - (GLM_COMPILER & GLM_COMPILER_VC) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA))) -#endif - -// N2346 -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_DEFAULTED_FUNCTIONS __has_feature(cxx_defaulted_functions) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_DEFAULTED_FUNCTIONS 1 -#else -# define GLM_HAS_DEFAULTED_FUNCTIONS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ - ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ - (GLM_COMPILER & GLM_COMPILER_CUDA))) -#endif - -// N2118 -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_RVALUE_REFERENCES __has_feature(cxx_rvalue_references) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_RVALUE_REFERENCES 1 -#else -# define GLM_HAS_RVALUE_REFERENCES ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_VC)) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA)))) -#endif - -// N2437 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2437.pdf -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS __has_feature(cxx_explicit_conversions) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS 1 -#else -# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL14)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA)))) -#endif - -// N2258 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2258.pdf -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_TEMPLATE_ALIASES __has_feature(cxx_alias_templates) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_TEMPLATE_ALIASES 1 -#else -# define GLM_HAS_TEMPLATE_ALIASES ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA)))) -#endif - -// N2930 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2009/n2930.html -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_RANGE_FOR __has_feature(cxx_range_for) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_RANGE_FOR 1 -#else -# define GLM_HAS_RANGE_FOR ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC)) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA)))) -#endif - -// N2341 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_ALIGNOF __has_feature(cxx_alignas) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_ALIGNOF 1 -#else -# define GLM_HAS_ALIGNOF ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL15)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC14)) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA)))) -#endif - -// N2235 Generalized Constant Expressions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2235.pdf -// N3652 Extended Constant Expressions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3652.html -#if (GLM_ARCH & GLM_ARCH_SIMD_BIT) // Compiler SIMD intrinsics don't support constexpr... -# define GLM_HAS_CONSTEXPR 0 -#elif (GLM_COMPILER & GLM_COMPILER_CLANG) -# define GLM_HAS_CONSTEXPR __has_feature(cxx_relaxed_constexpr) -#elif (GLM_LANG & GLM_LANG_CXX14_FLAG) -# define GLM_HAS_CONSTEXPR 1 -#else -# define GLM_HAS_CONSTEXPR ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && GLM_HAS_INITIALIZER_LISTS && (\ - ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL17)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15)))) -#endif - -#if GLM_HAS_CONSTEXPR -# define GLM_CONSTEXPR constexpr -#else -# define GLM_CONSTEXPR -#endif - -// -#if GLM_HAS_CONSTEXPR -# if (GLM_COMPILER & GLM_COMPILER_CLANG) -# if __has_feature(cxx_if_constexpr) -# define GLM_HAS_IF_CONSTEXPR 1 -# else -# define GLM_HAS_IF_CONSTEXPR 0 -# endif -# elif (GLM_LANG & GLM_LANG_CXX17_FLAG) -# define GLM_HAS_IF_CONSTEXPR 1 -# else -# define GLM_HAS_IF_CONSTEXPR 0 -# endif -#else -# define GLM_HAS_IF_CONSTEXPR 0 -#endif - -#if GLM_HAS_IF_CONSTEXPR -# define GLM_IF_CONSTEXPR if constexpr -#else -# define GLM_IF_CONSTEXPR if -#endif - -// -#if GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_ASSIGNABLE 1 -#else -# define GLM_HAS_ASSIGNABLE ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15)) || \ - ((GLM_COMPILER & GLM_COMPILER_GCC) && (GLM_COMPILER >= GLM_COMPILER_GCC49)))) -#endif - -// -#define GLM_HAS_TRIVIAL_QUERIES 0 - -// -#if GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_MAKE_SIGNED 1 -#else -# define GLM_HAS_MAKE_SIGNED ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA)))) -#endif - -// -#if defined(GLM_FORCE_INTRINSICS) -# define GLM_HAS_BITSCAN_WINDOWS ((GLM_PLATFORM & GLM_PLATFORM_WINDOWS) && (\ - ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC14) && (GLM_ARCH & GLM_ARCH_X86_BIT)))) -#else -# define GLM_HAS_BITSCAN_WINDOWS 0 -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// OpenMP -#ifdef _OPENMP -# if GLM_COMPILER & GLM_COMPILER_GCC -# if GLM_COMPILER >= GLM_COMPILER_GCC61 -# define GLM_HAS_OPENMP 45 -# elif GLM_COMPILER >= GLM_COMPILER_GCC49 -# define GLM_HAS_OPENMP 40 -# elif GLM_COMPILER >= GLM_COMPILER_GCC47 -# define GLM_HAS_OPENMP 31 -# else -# define GLM_HAS_OPENMP 0 -# endif -# elif GLM_COMPILER & GLM_COMPILER_CLANG -# if GLM_COMPILER >= GLM_COMPILER_CLANG38 -# define GLM_HAS_OPENMP 31 -# else -# define GLM_HAS_OPENMP 0 -# endif -# elif GLM_COMPILER & GLM_COMPILER_VC -# define GLM_HAS_OPENMP 20 -# elif GLM_COMPILER & GLM_COMPILER_INTEL -# if GLM_COMPILER >= GLM_COMPILER_INTEL16 -# define GLM_HAS_OPENMP 40 -# else -# define GLM_HAS_OPENMP 0 -# endif -# else -# define GLM_HAS_OPENMP 0 -# endif -#else -# define GLM_HAS_OPENMP 0 -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// nullptr - -#if GLM_LANG & GLM_LANG_CXX0X_FLAG -# define GLM_CONFIG_NULLPTR GLM_ENABLE -#else -# define GLM_CONFIG_NULLPTR GLM_DISABLE -#endif - -#if GLM_CONFIG_NULLPTR == GLM_ENABLE -# define GLM_NULLPTR nullptr -#else -# define GLM_NULLPTR 0 -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Static assert - -#if GLM_HAS_STATIC_ASSERT -# define GLM_STATIC_ASSERT(x, message) static_assert(x, message) -#elif GLM_COMPILER & GLM_COMPILER_VC -# define GLM_STATIC_ASSERT(x, message) typedef char __CASSERT__##__LINE__[(x) ? 1 : -1] -#else -# define GLM_STATIC_ASSERT(x, message) assert(x) -#endif//GLM_LANG - -/////////////////////////////////////////////////////////////////////////////////// -// Qualifiers - -#if GLM_COMPILER & GLM_COMPILER_CUDA -# define GLM_CUDA_FUNC_DEF __device__ __host__ -# define GLM_CUDA_FUNC_DECL __device__ __host__ -#else -# define GLM_CUDA_FUNC_DEF -# define GLM_CUDA_FUNC_DECL -#endif - -#if defined(GLM_FORCE_INLINE) -# if GLM_COMPILER & GLM_COMPILER_VC -# define GLM_INLINE __forceinline -# define GLM_NEVER_INLINE __declspec((noinline)) -# elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG) -# define GLM_INLINE inline __attribute__((__always_inline__)) -# define GLM_NEVER_INLINE __attribute__((__noinline__)) -# elif GLM_COMPILER & GLM_COMPILER_CUDA -# define GLM_INLINE __forceinline__ -# define GLM_NEVER_INLINE __noinline__ -# else -# define GLM_INLINE inline -# define GLM_NEVER_INLINE -# endif//GLM_COMPILER -#else -# define GLM_INLINE inline -# define GLM_NEVER_INLINE -#endif//defined(GLM_FORCE_INLINE) - -#define GLM_FUNC_DECL GLM_CUDA_FUNC_DECL -#define GLM_FUNC_QUALIFIER GLM_CUDA_FUNC_DEF GLM_INLINE - -/////////////////////////////////////////////////////////////////////////////////// -// Swizzle operators - -// User defines: GLM_FORCE_SWIZZLE - -#define GLM_SWIZZLE_DISABLED 0 -#define GLM_SWIZZLE_OPERATOR 1 -#define GLM_SWIZZLE_FUNCTION 2 - -#if defined(GLM_FORCE_XYZW_ONLY) -# undef GLM_FORCE_SWIZZLE -#endif - -#if defined(GLM_SWIZZLE) -# pragma message("GLM: GLM_SWIZZLE is deprecated, use GLM_FORCE_SWIZZLE instead.") -# define GLM_FORCE_SWIZZLE -#endif - -#if defined(GLM_FORCE_SWIZZLE) && (GLM_LANG & GLM_LANG_CXXMS_FLAG) -# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_OPERATOR -#elif defined(GLM_FORCE_SWIZZLE) -# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_FUNCTION -#else -# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_DISABLED -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Allows using not basic types as genType - -// #define GLM_FORCE_UNRESTRICTED_GENTYPE - -#ifdef GLM_FORCE_UNRESTRICTED_GENTYPE -# define GLM_CONFIG_UNRESTRICTED_GENTYPE GLM_ENABLE -#else -# define GLM_CONFIG_UNRESTRICTED_GENTYPE GLM_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Clip control, define GLM_FORCE_DEPTH_ZERO_TO_ONE before including GLM -// to use a clip space between 0 to 1. -// Coordinate system, define GLM_FORCE_LEFT_HANDED before including GLM -// to use left handed coordinate system by default. - -#define GLM_CLIP_CONTROL_ZO_BIT (1 << 0) // ZERO_TO_ONE -#define GLM_CLIP_CONTROL_NO_BIT (1 << 1) // NEGATIVE_ONE_TO_ONE -#define GLM_CLIP_CONTROL_LH_BIT (1 << 2) // LEFT_HANDED, For DirectX, Metal, Vulkan -#define GLM_CLIP_CONTROL_RH_BIT (1 << 3) // RIGHT_HANDED, For OpenGL, default in GLM - -#define GLM_CLIP_CONTROL_LH_ZO (GLM_CLIP_CONTROL_LH_BIT | GLM_CLIP_CONTROL_ZO_BIT) -#define GLM_CLIP_CONTROL_LH_NO (GLM_CLIP_CONTROL_LH_BIT | GLM_CLIP_CONTROL_NO_BIT) -#define GLM_CLIP_CONTROL_RH_ZO (GLM_CLIP_CONTROL_RH_BIT | GLM_CLIP_CONTROL_ZO_BIT) -#define GLM_CLIP_CONTROL_RH_NO (GLM_CLIP_CONTROL_RH_BIT | GLM_CLIP_CONTROL_NO_BIT) - -#ifdef GLM_FORCE_DEPTH_ZERO_TO_ONE -# ifdef GLM_FORCE_LEFT_HANDED -# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_LH_ZO -# else -# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_RH_ZO -# endif -#else -# ifdef GLM_FORCE_LEFT_HANDED -# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_LH_NO -# else -# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_RH_NO -# endif -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Qualifiers - -#if (GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS)) -# define GLM_DEPRECATED __declspec(deprecated) -# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef __declspec(align(alignment)) type name -#elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG | GLM_COMPILER_INTEL) -# define GLM_DEPRECATED __attribute__((__deprecated__)) -# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name __attribute__((aligned(alignment))) -#elif GLM_COMPILER & GLM_COMPILER_CUDA -# define GLM_DEPRECATED -# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name __align__(x) -#else -# define GLM_DEPRECATED -# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name -#endif - -/////////////////////////////////////////////////////////////////////////////////// - -#ifdef GLM_FORCE_EXPLICIT_CTOR -# define GLM_EXPLICIT explicit -#else -# define GLM_EXPLICIT -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// SYCL - -#if GLM_COMPILER==GLM_COMPILER_SYCL - -#include -#include - -namespace glm { -namespace std { - // Import SYCL's functions into the namespace glm::std to force their usages. - // It's important to use the math built-in function (sin, exp, ...) - // of SYCL instead the std ones. - using namespace cl::sycl; - - /////////////////////////////////////////////////////////////////////////////// - // Import some "harmless" std's stuffs used by glm into - // the new glm::std namespace. - template - using numeric_limits = ::std::numeric_limits; - - using ::std::size_t; - - using ::std::uint8_t; - using ::std::uint16_t; - using ::std::uint32_t; - using ::std::uint64_t; - - using ::std::int8_t; - using ::std::int16_t; - using ::std::int32_t; - using ::std::int64_t; - - using ::std::make_unsigned; - /////////////////////////////////////////////////////////////////////////////// -} //namespace std -} //namespace glm - -#endif - -/////////////////////////////////////////////////////////////////////////////////// - -/////////////////////////////////////////////////////////////////////////////////// -// Length type: all length functions returns a length_t type. -// When GLM_FORCE_SIZE_T_LENGTH is defined, length_t is a typedef of size_t otherwise -// length_t is a typedef of int like GLSL defines it. - -#define GLM_LENGTH_INT 1 -#define GLM_LENGTH_SIZE_T 2 - -#ifdef GLM_FORCE_SIZE_T_LENGTH -# define GLM_CONFIG_LENGTH_TYPE GLM_LENGTH_SIZE_T -#else -# define GLM_CONFIG_LENGTH_TYPE GLM_LENGTH_INT -#endif - -namespace glm -{ - using std::size_t; -# if GLM_CONFIG_LENGTH_TYPE == GLM_LENGTH_SIZE_T - typedef size_t length_t; -# else - typedef int length_t; -# endif -}//namespace glm - -/////////////////////////////////////////////////////////////////////////////////// -// constexpr - -#if GLM_HAS_CONSTEXPR -# define GLM_CONFIG_CONSTEXP GLM_ENABLE - - namespace glm - { - template - constexpr std::size_t countof(T const (&)[N]) - { - return N; - } - }//namespace glm -# define GLM_COUNTOF(arr) glm::countof(arr) -#elif defined(_MSC_VER) -# define GLM_CONFIG_CONSTEXP GLM_DISABLE - -# define GLM_COUNTOF(arr) _countof(arr) -#else -# define GLM_CONFIG_CONSTEXP GLM_DISABLE - -# define GLM_COUNTOF(arr) sizeof(arr) / sizeof(arr[0]) -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// uint - -namespace glm{ -namespace detail -{ - template - struct is_int - { - enum test {value = 0}; - }; - - template<> - struct is_int - { - enum test {value = ~0}; - }; - - template<> - struct is_int - { - enum test {value = ~0}; - }; -}//namespace detail - - typedef unsigned int uint; -}//namespace glm - -/////////////////////////////////////////////////////////////////////////////////// -// 64-bit int - -#if GLM_HAS_EXTENDED_INTEGER_TYPE -# include -#endif - -namespace glm{ -namespace detail -{ -# if GLM_HAS_EXTENDED_INTEGER_TYPE - typedef std::uint64_t uint64; - typedef std::int64_t int64; -# elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) // C99 detected, 64 bit types available - typedef uint64_t uint64; - typedef int64_t int64; -# elif GLM_COMPILER & GLM_COMPILER_VC - typedef unsigned __int64 uint64; - typedef signed __int64 int64; -# elif GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic ignored "-Wlong-long" - __extension__ typedef unsigned long long uint64; - __extension__ typedef signed long long int64; -# elif (GLM_COMPILER & GLM_COMPILER_CLANG) -# pragma clang diagnostic ignored "-Wc++11-long-long" - typedef unsigned long long uint64; - typedef signed long long int64; -# else//unknown compiler - typedef unsigned long long uint64; - typedef signed long long int64; -# endif -}//namespace detail -}//namespace glm - -/////////////////////////////////////////////////////////////////////////////////// -// make_unsigned - -#if GLM_HAS_MAKE_SIGNED -# include - -namespace glm{ -namespace detail -{ - using std::make_unsigned; -}//namespace detail -}//namespace glm - -#else - -namespace glm{ -namespace detail -{ - template - struct make_unsigned - {}; - - template<> - struct make_unsigned - { - typedef unsigned char type; - }; - - template<> - struct make_unsigned - { - typedef unsigned char type; - }; - - template<> - struct make_unsigned - { - typedef unsigned short type; - }; - - template<> - struct make_unsigned - { - typedef unsigned int type; - }; - - template<> - struct make_unsigned - { - typedef unsigned long type; - }; - - template<> - struct make_unsigned - { - typedef uint64 type; - }; - - template<> - struct make_unsigned - { - typedef unsigned char type; - }; - - template<> - struct make_unsigned - { - typedef unsigned short type; - }; - - template<> - struct make_unsigned - { - typedef unsigned int type; - }; - - template<> - struct make_unsigned - { - typedef unsigned long type; - }; - - template<> - struct make_unsigned - { - typedef uint64 type; - }; -}//namespace detail -}//namespace glm -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Only use x, y, z, w as vector type components - -#ifdef GLM_FORCE_XYZW_ONLY -# define GLM_CONFIG_XYZW_ONLY GLM_ENABLE -#else -# define GLM_CONFIG_XYZW_ONLY GLM_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Configure the use of defaulted initialized types - -#define GLM_CTOR_INIT_DISABLE 0 -#define GLM_CTOR_INITIALIZER_LIST 1 -#define GLM_CTOR_INITIALISATION 2 - -#if defined(GLM_FORCE_CTOR_INIT) && GLM_HAS_INITIALIZER_LISTS -# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INITIALIZER_LIST -#elif defined(GLM_FORCE_CTOR_INIT) && !GLM_HAS_INITIALIZER_LISTS -# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INITIALISATION -#else -# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INIT_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Use SIMD instruction sets - -#if GLM_HAS_ALIGNOF && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && (GLM_ARCH & GLM_ARCH_SIMD_BIT) -# define GLM_CONFIG_SIMD GLM_ENABLE -#else -# define GLM_CONFIG_SIMD GLM_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Configure the use of defaulted function - -#if GLM_HAS_DEFAULTED_FUNCTIONS && GLM_CONFIG_CTOR_INIT == GLM_CTOR_INIT_DISABLE -# define GLM_CONFIG_DEFAULTED_FUNCTIONS GLM_ENABLE -# define GLM_DEFAULT = default -#else -# define GLM_CONFIG_DEFAULTED_FUNCTIONS GLM_DISABLE -# define GLM_DEFAULT -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Configure the use of aligned gentypes - -#ifdef GLM_FORCE_ALIGNED // Legacy define -# define GLM_FORCE_DEFAULT_ALIGNED_GENTYPES -#endif - -#ifdef GLM_FORCE_DEFAULT_ALIGNED_GENTYPES -# define GLM_FORCE_ALIGNED_GENTYPES -#endif - -#if GLM_HAS_ALIGNOF && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && (defined(GLM_FORCE_ALIGNED_GENTYPES) || (GLM_CONFIG_SIMD == GLM_ENABLE)) -# define GLM_CONFIG_ALIGNED_GENTYPES GLM_ENABLE -#else -# define GLM_CONFIG_ALIGNED_GENTYPES GLM_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Configure the use of anonymous structure as implementation detail - -#if ((GLM_CONFIG_SIMD == GLM_ENABLE) || (GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR) || (GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE)) -# define GLM_CONFIG_ANONYMOUS_STRUCT GLM_ENABLE -#else -# define GLM_CONFIG_ANONYMOUS_STRUCT GLM_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Silent warnings - -#ifdef GLM_FORCE_SILENT_WARNINGS -# define GLM_SILENT_WARNINGS GLM_ENABLE -#else -# define GLM_SILENT_WARNINGS GLM_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Precision - -#define GLM_HIGHP 1 -#define GLM_MEDIUMP 2 -#define GLM_LOWP 3 - -#if defined(GLM_FORCE_PRECISION_HIGHP_BOOL) || defined(GLM_PRECISION_HIGHP_BOOL) -# define GLM_CONFIG_PRECISION_BOOL GLM_HIGHP -#elif defined(GLM_FORCE_PRECISION_MEDIUMP_BOOL) || defined(GLM_PRECISION_MEDIUMP_BOOL) -# define GLM_CONFIG_PRECISION_BOOL GLM_MEDIUMP -#elif defined(GLM_FORCE_PRECISION_LOWP_BOOL) || defined(GLM_PRECISION_LOWP_BOOL) -# define GLM_CONFIG_PRECISION_BOOL GLM_LOWP -#else -# define GLM_CONFIG_PRECISION_BOOL GLM_HIGHP -#endif - -#if defined(GLM_FORCE_PRECISION_HIGHP_INT) || defined(GLM_PRECISION_HIGHP_INT) -# define GLM_CONFIG_PRECISION_INT GLM_HIGHP -#elif defined(GLM_FORCE_PRECISION_MEDIUMP_INT) || defined(GLM_PRECISION_MEDIUMP_INT) -# define GLM_CONFIG_PRECISION_INT GLM_MEDIUMP -#elif defined(GLM_FORCE_PRECISION_LOWP_INT) || defined(GLM_PRECISION_LOWP_INT) -# define GLM_CONFIG_PRECISION_INT GLM_LOWP -#else -# define GLM_CONFIG_PRECISION_INT GLM_HIGHP -#endif - -#if defined(GLM_FORCE_PRECISION_HIGHP_UINT) || defined(GLM_PRECISION_HIGHP_UINT) -# define GLM_CONFIG_PRECISION_UINT GLM_HIGHP -#elif defined(GLM_FORCE_PRECISION_MEDIUMP_UINT) || defined(GLM_PRECISION_MEDIUMP_UINT) -# define GLM_CONFIG_PRECISION_UINT GLM_MEDIUMP -#elif defined(GLM_FORCE_PRECISION_LOWP_UINT) || defined(GLM_PRECISION_LOWP_UINT) -# define GLM_CONFIG_PRECISION_UINT GLM_LOWP -#else -# define GLM_CONFIG_PRECISION_UINT GLM_HIGHP -#endif - -#if defined(GLM_FORCE_PRECISION_HIGHP_FLOAT) || defined(GLM_PRECISION_HIGHP_FLOAT) -# define GLM_CONFIG_PRECISION_FLOAT GLM_HIGHP -#elif defined(GLM_FORCE_PRECISION_MEDIUMP_FLOAT) || defined(GLM_PRECISION_MEDIUMP_FLOAT) -# define GLM_CONFIG_PRECISION_FLOAT GLM_MEDIUMP -#elif defined(GLM_FORCE_PRECISION_LOWP_FLOAT) || defined(GLM_PRECISION_LOWP_FLOAT) -# define GLM_CONFIG_PRECISION_FLOAT GLM_LOWP -#else -# define GLM_CONFIG_PRECISION_FLOAT GLM_HIGHP -#endif - -#if defined(GLM_FORCE_PRECISION_HIGHP_DOUBLE) || defined(GLM_PRECISION_HIGHP_DOUBLE) -# define GLM_CONFIG_PRECISION_DOUBLE GLM_HIGHP -#elif defined(GLM_FORCE_PRECISION_MEDIUMP_DOUBLE) || defined(GLM_PRECISION_MEDIUMP_DOUBLE) -# define GLM_CONFIG_PRECISION_DOUBLE GLM_MEDIUMP -#elif defined(GLM_FORCE_PRECISION_LOWP_DOUBLE) || defined(GLM_PRECISION_LOWP_DOUBLE) -# define GLM_CONFIG_PRECISION_DOUBLE GLM_LOWP -#else -# define GLM_CONFIG_PRECISION_DOUBLE GLM_HIGHP -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Check inclusions of different versions of GLM - -#elif ((GLM_SETUP_INCLUDED != GLM_VERSION) && !defined(GLM_FORCE_IGNORE_VERSION)) -# error "GLM error: A different version of GLM is already included. Define GLM_FORCE_IGNORE_VERSION before including GLM headers to ignore this error." -#elif GLM_SETUP_INCLUDED == GLM_VERSION - -/////////////////////////////////////////////////////////////////////////////////// -// Messages - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_MESSAGE_DISPLAYED) -# define GLM_MESSAGE_DISPLAYED -# define GLM_STR_HELPER(x) #x -# define GLM_STR(x) GLM_STR_HELPER(x) - - // Report GLM version -# pragma message (GLM_STR(GLM_VERSION_MESSAGE)) - - // Report C++ language -# if (GLM_LANG & GLM_LANG_CXX2A_FLAG) && (GLM_LANG & GLM_LANG_EXT) -# pragma message("GLM: C++ 2A with extensions") -# elif (GLM_LANG & GLM_LANG_CXX2A_FLAG) -# pragma message("GLM: C++ 2A") -# elif (GLM_LANG & GLM_LANG_CXX17_FLAG) && (GLM_LANG & GLM_LANG_EXT) -# pragma message("GLM: C++ 17 with extensions") -# elif (GLM_LANG & GLM_LANG_CXX17_FLAG) -# pragma message("GLM: C++ 17") -# elif (GLM_LANG & GLM_LANG_CXX14_FLAG) && (GLM_LANG & GLM_LANG_EXT) -# pragma message("GLM: C++ 14 with extensions") -# elif (GLM_LANG & GLM_LANG_CXX14_FLAG) -# pragma message("GLM: C++ 14") -# elif (GLM_LANG & GLM_LANG_CXX11_FLAG) && (GLM_LANG & GLM_LANG_EXT) -# pragma message("GLM: C++ 11 with extensions") -# elif (GLM_LANG & GLM_LANG_CXX11_FLAG) -# pragma message("GLM: C++ 11") -# elif (GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_LANG & GLM_LANG_EXT) -# pragma message("GLM: C++ 0x with extensions") -# elif (GLM_LANG & GLM_LANG_CXX0X_FLAG) -# pragma message("GLM: C++ 0x") -# elif (GLM_LANG & GLM_LANG_CXX03_FLAG) && (GLM_LANG & GLM_LANG_EXT) -# pragma message("GLM: C++ 03 with extensions") -# elif (GLM_LANG & GLM_LANG_CXX03_FLAG) -# pragma message("GLM: C++ 03") -# elif (GLM_LANG & GLM_LANG_CXX98_FLAG) && (GLM_LANG & GLM_LANG_EXT) -# pragma message("GLM: C++ 98 with extensions") -# elif (GLM_LANG & GLM_LANG_CXX98_FLAG) -# pragma message("GLM: C++ 98") -# else -# pragma message("GLM: C++ language undetected") -# endif//GLM_LANG - - // Report compiler detection -# if GLM_COMPILER & GLM_COMPILER_CUDA -# pragma message("GLM: CUDA compiler detected") -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma message("GLM: Visual C++ compiler detected") -# elif GLM_COMPILER & GLM_COMPILER_CLANG -# pragma message("GLM: Clang compiler detected") -# elif GLM_COMPILER & GLM_COMPILER_INTEL -# pragma message("GLM: Intel Compiler detected") -# elif GLM_COMPILER & GLM_COMPILER_GCC -# pragma message("GLM: GCC compiler detected") -# else -# pragma message("GLM: Compiler not detected") -# endif - - // Report build target -# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits with AVX2 instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_AVX2_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits with AVX2 instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_AVX_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits with AVX instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_AVX_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits with AVX instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_SSE42_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits with SSE4.2 instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_SSE42_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits with SSE4.2 instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_SSE41_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits with SSE4.1 instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_SSE41_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits with SSE4.1 instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_SSSE3_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits with SSSE3 instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_SSSE3_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits with SSSE3 instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_SSE3_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits with SSE3 instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_SSE3_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits with SSE3 instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_SSE2_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits with SSE2 instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_SSE2_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits with SSE2 instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_X86_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits build target") -# elif (GLM_ARCH & GLM_ARCH_X86_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits build target") - -# elif (GLM_ARCH & GLM_ARCH_NEON_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: ARM 64 bits with Neon instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_NEON_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: ARM 32 bits with Neon instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_ARM_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: ARM 64 bits build target") -# elif (GLM_ARCH & GLM_ARCH_ARM_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: ARM 32 bits build target") - -# elif (GLM_ARCH & GLM_ARCH_MIPS_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: MIPS 64 bits build target") -# elif (GLM_ARCH & GLM_ARCH_MIPS_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: MIPS 32 bits build target") - -# elif (GLM_ARCH & GLM_ARCH_PPC_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: PowerPC 64 bits build target") -# elif (GLM_ARCH & GLM_ARCH_PPC_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: PowerPC 32 bits build target") -# else -# pragma message("GLM: Unknown build target") -# endif//GLM_ARCH - - // Report platform name -# if(GLM_PLATFORM & GLM_PLATFORM_QNXNTO) -# pragma message("GLM: QNX platform detected") -//# elif(GLM_PLATFORM & GLM_PLATFORM_IOS) -//# pragma message("GLM: iOS platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_APPLE) -# pragma message("GLM: Apple platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_WINCE) -# pragma message("GLM: WinCE platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_WINDOWS) -# pragma message("GLM: Windows platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_CHROME_NACL) -# pragma message("GLM: Native Client detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) -# pragma message("GLM: Android platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_LINUX) -# pragma message("GLM: Linux platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_UNIX) -# pragma message("GLM: UNIX platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_UNKNOWN) -# pragma message("GLM: platform unknown") -# else -# pragma message("GLM: platform not detected") -# endif - - // Report whether only xyzw component are used -# if defined GLM_FORCE_XYZW_ONLY -# pragma message("GLM: GLM_FORCE_XYZW_ONLY is defined. Only x, y, z and w component are available in vector type. This define disables swizzle operators and SIMD instruction sets.") -# endif - - // Report swizzle operator support -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR -# pragma message("GLM: GLM_FORCE_SWIZZLE is defined, swizzling operators enabled.") -# elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION -# pragma message("GLM: GLM_FORCE_SWIZZLE is defined, swizzling functions enabled. Enable compiler C++ language extensions to enable swizzle operators.") -# else -# pragma message("GLM: GLM_FORCE_SWIZZLE is undefined. swizzling functions or operators are disabled.") -# endif - - // Report .length() type -# if GLM_CONFIG_LENGTH_TYPE == GLM_LENGTH_SIZE_T -# pragma message("GLM: GLM_FORCE_SIZE_T_LENGTH is defined. .length() returns a glm::length_t, a typedef of std::size_t.") -# else -# pragma message("GLM: GLM_FORCE_SIZE_T_LENGTH is undefined. .length() returns a glm::length_t, a typedef of int following GLSL.") -# endif - -# if GLM_CONFIG_UNRESTRICTED_GENTYPE == GLM_ENABLE -# pragma message("GLM: GLM_FORCE_UNRESTRICTED_GENTYPE is defined. Removes GLSL restrictions on valid function genTypes.") -# else -# pragma message("GLM: GLM_FORCE_UNRESTRICTED_GENTYPE is undefined. Follows strictly GLSL on valid function genTypes.") -# endif - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# pragma message("GLM: GLM_FORCE_SILENT_WARNINGS is defined. Ignores C++ warnings from using C++ language extensions.") -# else -# pragma message("GLM: GLM_FORCE_SILENT_WARNINGS is undefined. Shows C++ warnings from using C++ language extensions.") -# endif - -# ifdef GLM_FORCE_SINGLE_ONLY -# pragma message("GLM: GLM_FORCE_SINGLE_ONLY is defined. Using only single precision floating-point types.") -# endif - -# if defined(GLM_FORCE_ALIGNED_GENTYPES) && (GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE) -# undef GLM_FORCE_ALIGNED_GENTYPES -# pragma message("GLM: GLM_FORCE_ALIGNED_GENTYPES is defined, allowing aligned types. This prevents the use of C++ constexpr.") -# elif defined(GLM_FORCE_ALIGNED_GENTYPES) && (GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE) -# undef GLM_FORCE_ALIGNED_GENTYPES -# pragma message("GLM: GLM_FORCE_ALIGNED_GENTYPES is defined but is disabled. It requires C++11 and language extensions.") -# endif - -# if defined(GLM_FORCE_DEFAULT_ALIGNED_GENTYPES) -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE -# undef GLM_FORCE_DEFAULT_ALIGNED_GENTYPES -# pragma message("GLM: GLM_FORCE_DEFAULT_ALIGNED_GENTYPES is defined but is disabled. It requires C++11 and language extensions.") -# elif GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE -# pragma message("GLM: GLM_FORCE_DEFAULT_ALIGNED_GENTYPES is defined. All gentypes (e.g. vec3) will be aligned and padded by default.") -# endif -# endif - -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT -# pragma message("GLM: GLM_FORCE_DEPTH_ZERO_TO_ONE is defined. Using zero to one depth clip space.") -# else -# pragma message("GLM: GLM_FORCE_DEPTH_ZERO_TO_ONE is undefined. Using negative one to one depth clip space.") -# endif - -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT -# pragma message("GLM: GLM_FORCE_LEFT_HANDED is defined. Using left handed coordinate system.") -# else -# pragma message("GLM: GLM_FORCE_LEFT_HANDED is undefined. Using right handed coordinate system.") -# endif -#endif//GLM_MESSAGES - -#endif//GLM_SETUP_INCLUDED diff --git a/third_party/glm/detail/type_float.hpp b/third_party/glm/detail/type_float.hpp deleted file mode 100755 index c8037eb..0000000 --- a/third_party/glm/detail/type_float.hpp +++ /dev/null @@ -1,68 +0,0 @@ -#pragma once - -#include "setup.hpp" - -#if GLM_COMPILER == GLM_COMPILER_VC12 -# pragma warning(push) -# pragma warning(disable: 4512) // assignment operator could not be generated -#endif - -namespace glm{ -namespace detail -{ - template - union float_t - {}; - - // https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ - template <> - union float_t - { - typedef int int_type; - typedef float float_type; - - GLM_CONSTEXPR float_t(float_type Num = 0.0f) : f(Num) {} - - GLM_CONSTEXPR float_t& operator=(float_t const& x) - { - f = x.f; - return *this; - } - - // Portable extraction of components. - GLM_CONSTEXPR bool negative() const { return i < 0; } - GLM_CONSTEXPR int_type mantissa() const { return i & ((1 << 23) - 1); } - GLM_CONSTEXPR int_type exponent() const { return (i >> 23) & ((1 << 8) - 1); } - - int_type i; - float_type f; - }; - - template <> - union float_t - { - typedef detail::int64 int_type; - typedef double float_type; - - GLM_CONSTEXPR float_t(float_type Num = static_cast(0)) : f(Num) {} - - GLM_CONSTEXPR float_t& operator=(float_t const& x) - { - f = x.f; - return *this; - } - - // Portable extraction of components. - GLM_CONSTEXPR bool negative() const { return i < 0; } - GLM_CONSTEXPR int_type mantissa() const { return i & ((int_type(1) << 52) - 1); } - GLM_CONSTEXPR int_type exponent() const { return (i >> 52) & ((int_type(1) << 11) - 1); } - - int_type i; - float_type f; - }; -}//namespace detail -}//namespace glm - -#if GLM_COMPILER == GLM_COMPILER_VC12 -# pragma warning(pop) -#endif diff --git a/third_party/glm/detail/type_half.hpp b/third_party/glm/detail/type_half.hpp deleted file mode 100755 index 40b8bec..0000000 --- a/third_party/glm/detail/type_half.hpp +++ /dev/null @@ -1,16 +0,0 @@ -#pragma once - -#include "setup.hpp" - -namespace glm{ -namespace detail -{ - typedef short hdata; - - GLM_FUNC_DECL float toFloat32(hdata value); - GLM_FUNC_DECL hdata toFloat16(float const& value); - -}//namespace detail -}//namespace glm - -#include "type_half.inl" diff --git a/third_party/glm/detail/type_half.inl b/third_party/glm/detail/type_half.inl deleted file mode 100755 index b0723e3..0000000 --- a/third_party/glm/detail/type_half.inl +++ /dev/null @@ -1,241 +0,0 @@ -namespace glm{ -namespace detail -{ - GLM_FUNC_QUALIFIER float overflow() - { - volatile float f = 1e10; - - for(int i = 0; i < 10; ++i) - f *= f; // this will overflow before the for loop terminates - return f; - } - - union uif32 - { - GLM_FUNC_QUALIFIER uif32() : - i(0) - {} - - GLM_FUNC_QUALIFIER uif32(float f_) : - f(f_) - {} - - GLM_FUNC_QUALIFIER uif32(unsigned int i_) : - i(i_) - {} - - float f; - unsigned int i; - }; - - GLM_FUNC_QUALIFIER float toFloat32(hdata value) - { - int s = (value >> 15) & 0x00000001; - int e = (value >> 10) & 0x0000001f; - int m = value & 0x000003ff; - - if(e == 0) - { - if(m == 0) - { - // - // Plus or minus zero - // - - detail::uif32 result; - result.i = static_cast(s << 31); - return result.f; - } - else - { - // - // Denormalized number -- renormalize it - // - - while(!(m & 0x00000400)) - { - m <<= 1; - e -= 1; - } - - e += 1; - m &= ~0x00000400; - } - } - else if(e == 31) - { - if(m == 0) - { - // - // Positive or negative infinity - // - - uif32 result; - result.i = static_cast((s << 31) | 0x7f800000); - return result.f; - } - else - { - // - // Nan -- preserve sign and significand bits - // - - uif32 result; - result.i = static_cast((s << 31) | 0x7f800000 | (m << 13)); - return result.f; - } - } - - // - // Normalized number - // - - e = e + (127 - 15); - m = m << 13; - - // - // Assemble s, e and m. - // - - uif32 Result; - Result.i = static_cast((s << 31) | (e << 23) | m); - return Result.f; - } - - GLM_FUNC_QUALIFIER hdata toFloat16(float const& f) - { - uif32 Entry; - Entry.f = f; - int i = static_cast(Entry.i); - - // - // Our floating point number, f, is represented by the bit - // pattern in integer i. Disassemble that bit pattern into - // the sign, s, the exponent, e, and the significand, m. - // Shift s into the position where it will go in the - // resulting half number. - // Adjust e, accounting for the different exponent bias - // of float and half (127 versus 15). - // - - int s = (i >> 16) & 0x00008000; - int e = ((i >> 23) & 0x000000ff) - (127 - 15); - int m = i & 0x007fffff; - - // - // Now reassemble s, e and m into a half: - // - - if(e <= 0) - { - if(e < -10) - { - // - // E is less than -10. The absolute value of f is - // less than half_MIN (f may be a small normalized - // float, a denormalized float or a zero). - // - // We convert f to a half zero. - // - - return hdata(s); - } - - // - // E is between -10 and 0. F is a normalized float, - // whose magnitude is less than __half_NRM_MIN. - // - // We convert f to a denormalized half. - // - - m = (m | 0x00800000) >> (1 - e); - - // - // Round to nearest, round "0.5" up. - // - // Rounding may cause the significand to overflow and make - // our number normalized. Because of the way a half's bits - // are laid out, we don't have to treat this case separately; - // the code below will handle it correctly. - // - - if(m & 0x00001000) - m += 0x00002000; - - // - // Assemble the half from s, e (zero) and m. - // - - return hdata(s | (m >> 13)); - } - else if(e == 0xff - (127 - 15)) - { - if(m == 0) - { - // - // F is an infinity; convert f to a half - // infinity with the same sign as f. - // - - return hdata(s | 0x7c00); - } - else - { - // - // F is a NAN; we produce a half NAN that preserves - // the sign bit and the 10 leftmost bits of the - // significand of f, with one exception: If the 10 - // leftmost bits are all zero, the NAN would turn - // into an infinity, so we have to set at least one - // bit in the significand. - // - - m >>= 13; - - return hdata(s | 0x7c00 | m | (m == 0)); - } - } - else - { - // - // E is greater than zero. F is a normalized float. - // We try to convert f to a normalized half. - // - - // - // Round to nearest, round "0.5" up - // - - if(m & 0x00001000) - { - m += 0x00002000; - - if(m & 0x00800000) - { - m = 0; // overflow in significand, - e += 1; // adjust exponent - } - } - - // - // Handle exponent overflow - // - - if (e > 30) - { - overflow(); // Cause a hardware floating point overflow; - - return hdata(s | 0x7c00); - // if this returns, the half becomes an - } // infinity with the same sign as f. - - // - // Assemble the half from s, e and m. - // - - return hdata(s | (e << 10) | (m >> 13)); - } - } - -}//namespace detail -}//namespace glm diff --git a/third_party/glm/detail/type_mat2x2.hpp b/third_party/glm/detail/type_mat2x2.hpp deleted file mode 100755 index 033908f..0000000 --- a/third_party/glm/detail/type_mat2x2.hpp +++ /dev/null @@ -1,177 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat2x2.hpp - -#pragma once - -#include "type_vec2.hpp" -#include -#include - -namespace glm -{ - template - struct mat<2, 2, T, Q> - { - typedef vec<2, T, Q> col_type; - typedef vec<2, T, Q> row_type; - typedef mat<2, 2, T, Q> type; - typedef mat<2, 2, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[2]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<2, 2, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T const& x1, T const& y1, - T const& x2, T const& y2); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v1, - col_type const& v2); - - // -- Conversions -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - U const& x1, V const& y1, - M const& x2, N const& y2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<2, U, Q> const& v1, - vec<2, V, Q> const& v2); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator=(mat<2, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator+=(mat<2, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator-=(mat<2, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator*=(mat<2, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator/=(U s); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator/=(mat<2, 2, U, Q> const& m); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<2, 2, T, Q> & operator++ (); - GLM_FUNC_DECL mat<2, 2, T, Q> & operator-- (); - GLM_FUNC_DECL mat<2, 2, T, Q> operator++(int); - GLM_FUNC_DECL mat<2, 2, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator+(T scalar, mat<2, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator-(T scalar, mat<2, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator*(T scalar, mat<2, 2, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<2, 2, T, Q>::col_type operator*(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<2, 2, T, Q>::row_type operator*(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator/(T scalar, mat<2, 2, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<2, 2, T, Q>::col_type operator/(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<2, 2, T, Q>::row_type operator/(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); -} //namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat2x2.inl" -#endif diff --git a/third_party/glm/detail/type_mat2x2.inl b/third_party/glm/detail/type_mat2x2.inl deleted file mode 100755 index fe5d1aa..0000000 --- a/third_party/glm/detail/type_mat2x2.inl +++ /dev/null @@ -1,536 +0,0 @@ -#include "../matrix.hpp" - -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0), col_type(0, 1)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0); - this->value[1] = col_type(0, 1); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 2, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{m[0], m[1]} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(T scalar) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(scalar, 0), col_type(0, scalar)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(scalar, 0); - this->value[1] = col_type(0, scalar); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat - ( - T const& x0, T const& y0, - T const& x1, T const& y1 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0), col_type(x1, y1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0); - this->value[1] = col_type(x1, y1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(col_type const& v0, col_type const& v1) -# if GLM_HAS_INITIALIZER_LISTS - : value{v0, v1} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = v0; - this->value[1] = v1; -# endif - } - - // -- Conversion constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat - ( - X1 const& x1, Y1 const& y1, - X2 const& x2, Y2 const& y2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(static_cast(x1), value_type(y1)), col_type(static_cast(x2), value_type(y2)) } -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(static_cast(x1), value_type(y1)); - this->value[1] = col_type(static_cast(x2), value_type(y2)); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v1); - this->value[1] = col_type(v2); -# endif - } - - // -- mat2x2 matrix conversions -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 2, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::col_type& mat<2, 2, T, Q>::operator[](typename mat<2, 2, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type const& mat<2, 2, T, Q>::operator[](typename mat<2, 2, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator=(mat<2, 2, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator+=(U scalar) - { - this->value[0] += scalar; - this->value[1] += scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator+=(mat<2, 2, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator-=(U scalar) - { - this->value[0] -= scalar; - this->value[1] -= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator-=(mat<2, 2, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator*=(U scalar) - { - this->value[0] *= scalar; - this->value[1] *= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator*=(mat<2, 2, U, Q> const& m) - { - return (*this = *this * m); - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator/=(U scalar) - { - this->value[0] /= scalar; - this->value[1] /= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator/=(mat<2, 2, U, Q> const& m) - { - return *this *= inverse(m); - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> mat<2, 2, T, Q>::operator++(int) - { - mat<2, 2, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> mat<2, 2, T, Q>::operator--(int) - { - mat<2, 2, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m) - { - return mat<2, 2, T, Q>( - -m[0], - -m[1]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m, T scalar) - { - return mat<2, 2, T, Q>( - m[0] + scalar, - m[1] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator+(T scalar, mat<2, 2, T, Q> const& m) - { - return mat<2, 2, T, Q>( - m[0] + scalar, - m[1] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - return mat<2, 2, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m, T scalar) - { - return mat<2, 2, T, Q>( - m[0] - scalar, - m[1] - scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator-(T scalar, mat<2, 2, T, Q> const& m) - { - return mat<2, 2, T, Q>( - scalar - m[0], - scalar - m[1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - return mat<2, 2, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m, T scalar) - { - return mat<2, 2, T, Q>( - m[0] * scalar, - m[1] * scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(T scalar, mat<2, 2, T, Q> const& m) - { - return mat<2, 2, T, Q>( - m[0] * scalar, - m[1] * scalar); - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::col_type operator* - ( - mat<2, 2, T, Q> const& m, - typename mat<2, 2, T, Q>::row_type const& v - ) - { - return vec<2, T, Q>( - m[0][0] * v.x + m[1][0] * v.y, - m[0][1] * v.x + m[1][1] * v.y); - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::row_type operator* - ( - typename mat<2, 2, T, Q>::col_type const& v, - mat<2, 2, T, Q> const& m - ) - { - return vec<2, T, Q>( - v.x * m[0][0] + v.y * m[0][1], - v.x * m[1][0] + v.y * m[1][1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - return mat<2, 2, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) - { - return mat<3, 2, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) - { - return mat<4, 2, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1], - m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1], - m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m, T scalar) - { - return mat<2, 2, T, Q>( - m[0] / scalar, - m[1] / scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator/(T scalar, mat<2, 2, T, Q> const& m) - { - return mat<2, 2, T, Q>( - scalar / m[0], - scalar / m[1]); - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::col_type operator/(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v) - { - return inverse(m) * v; - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::row_type operator/(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m) - { - return v * inverse(m); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - mat<2, 2, T, Q> m1_copy(m1); - return m1_copy /= m2; - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat2x3.hpp b/third_party/glm/detail/type_mat2x3.hpp deleted file mode 100755 index d6596e4..0000000 --- a/third_party/glm/detail/type_mat2x3.hpp +++ /dev/null @@ -1,159 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat2x3.hpp - -#pragma once - -#include "type_vec2.hpp" -#include "type_vec3.hpp" -#include -#include - -namespace glm -{ - template - struct mat<2, 3, T, Q> - { - typedef vec<3, T, Q> col_type; - typedef vec<2, T, Q> row_type; - typedef mat<2, 3, T, Q> type; - typedef mat<3, 2, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[2]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<2, 3, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T x0, T y0, T z0, - T x1, T y1, T z1); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1); - - // -- Conversions -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X1 x1, Y1 y1, Z1 z1, - X2 x2, Y2 y2, Z2 z2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<3, U, Q> const& v1, - vec<3, V, Q> const& v2); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<2, 3, T, Q> & operator=(mat<2, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 3, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<2, 3, T, Q> & operator+=(mat<2, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 3, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<2, 3, T, Q> & operator-=(mat<2, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 3, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<2, 3, T, Q> & operator/=(U s); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<2, 3, T, Q> & operator++ (); - GLM_FUNC_DECL mat<2, 3, T, Q> & operator-- (); - GLM_FUNC_DECL mat<2, 3, T, Q> operator++(int); - GLM_FUNC_DECL mat<2, 3, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator*(T scalar, mat<2, 3, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<2, 3, T, Q>::col_type operator*(mat<2, 3, T, Q> const& m, typename mat<2, 3, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<2, 3, T, Q>::row_type operator*(typename mat<2, 3, T, Q>::col_type const& v, mat<2, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<2, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<3, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<4, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator/(mat<2, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator/(T scalar, mat<2, 3, T, Q> const& m); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat2x3.inl" -#endif diff --git a/third_party/glm/detail/type_mat2x3.inl b/third_party/glm/detail/type_mat2x3.inl deleted file mode 100755 index 5fec17e..0000000 --- a/third_party/glm/detail/type_mat2x3.inl +++ /dev/null @@ -1,510 +0,0 @@ -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0, 0), col_type(0, 1, 0)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0, 0); - this->value[1] = col_type(0, 1, 0); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 3, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{m.value[0], m.value[1]} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m.value[0]; - this->value[1] = m.value[1]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(T scalar) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(scalar, 0, 0), col_type(0, scalar, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(scalar, 0, 0); - this->value[1] = col_type(0, scalar, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat - ( - T x0, T y0, T z0, - T x1, T y1, T z1 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0, z0), col_type(x1, y1, z1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0); - this->value[1] = col_type(x1, y1, z1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(col_type const& v0, col_type const& v1) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v0); - this->value[1] = col_type(v1); -# endif - } - - // -- Conversion constructors -- - - template - template< - typename X1, typename Y1, typename Z1, - typename X2, typename Y2, typename Z2> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat - ( - X1 x1, Y1 y1, Z1 z1, - X2 x2, Y2 y2, Z2 z2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x1, y1, z1), col_type(x2, y2, z2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x1, y1, z1); - this->value[1] = col_type(x2, y2, z2); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v1); - this->value[1] = col_type(v2); -# endif - } - - // -- Matrix conversions -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 3, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<2, 3, T, Q>::col_type & mat<2, 3, T, Q>::operator[](typename mat<2, 3, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 3, T, Q>::col_type const& mat<2, 3, T, Q>::operator[](typename mat<2, 3, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator=(mat<2, 3, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator+=(mat<2, 3, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator-=(mat<2, 3, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> mat<2, 3, T, Q>::operator++(int) - { - mat<2, 3, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> mat<2, 3, T, Q>::operator--(int) - { - mat<2, 3, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m) - { - return mat<2, 3, T, Q>( - -m[0], - -m[1]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m, T scalar) - { - return mat<2, 3, T, Q>( - m[0] + scalar, - m[1] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) - { - return mat<2, 3, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m, T scalar) - { - return mat<2, 3, T, Q>( - m[0] - scalar, - m[1] - scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) - { - return mat<2, 3, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m, T scalar) - { - return mat<2, 3, T, Q>( - m[0] * scalar, - m[1] * scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(T scalar, mat<2, 3, T, Q> const& m) - { - return mat<2, 3, T, Q>( - m[0] * scalar, - m[1] * scalar); - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 3, T, Q>::col_type operator* - ( - mat<2, 3, T, Q> const& m, - typename mat<2, 3, T, Q>::row_type const& v) - { - return typename mat<2, 3, T, Q>::col_type( - m[0][0] * v.x + m[1][0] * v.y, - m[0][1] * v.x + m[1][1] * v.y, - m[0][2] * v.x + m[1][2] * v.y); - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 3, T, Q>::row_type operator* - ( - typename mat<2, 3, T, Q>::col_type const& v, - mat<2, 3, T, Q> const& m) - { - return typename mat<2, 3, T, Q>::row_type( - v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2], - v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - return mat<2, 3, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<3, 2, T, Q> const& m2) - { - T SrcA00 = m1[0][0]; - T SrcA01 = m1[0][1]; - T SrcA02 = m1[0][2]; - T SrcA10 = m1[1][0]; - T SrcA11 = m1[1][1]; - T SrcA12 = m1[1][2]; - - T SrcB00 = m2[0][0]; - T SrcB01 = m2[0][1]; - T SrcB10 = m2[1][0]; - T SrcB11 = m2[1][1]; - T SrcB20 = m2[2][0]; - T SrcB21 = m2[2][1]; - - mat<3, 3, T, Q> Result; - Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01; - Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01; - Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01; - Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11; - Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11; - Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11; - Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21; - Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21; - Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<4, 2, T, Q> const& m2) - { - return mat<4, 3, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1], - m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1], - m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1], - m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1], - m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator/(mat<2, 3, T, Q> const& m, T scalar) - { - return mat<2, 3, T, Q>( - m[0] / scalar, - m[1] / scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator/(T scalar, mat<2, 3, T, Q> const& m) - { - return mat<2, 3, T, Q>( - scalar / m[0], - scalar / m[1]); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat2x4.hpp b/third_party/glm/detail/type_mat2x4.hpp deleted file mode 100755 index ff03e21..0000000 --- a/third_party/glm/detail/type_mat2x4.hpp +++ /dev/null @@ -1,161 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat2x4.hpp - -#pragma once - -#include "type_vec2.hpp" -#include "type_vec4.hpp" -#include -#include - -namespace glm -{ - template - struct mat<2, 4, T, Q> - { - typedef vec<4, T, Q> col_type; - typedef vec<2, T, Q> row_type; - typedef mat<2, 4, T, Q> type; - typedef mat<4, 2, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[2]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<2, 4, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T x0, T y0, T z0, T w0, - T x1, T y1, T z1, T w1); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1); - - // -- Conversions -- - - template< - typename X1, typename Y1, typename Z1, typename W1, - typename X2, typename Y2, typename Z2, typename W2> - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X1 x1, Y1 y1, Z1 z1, W1 w1, - X2 x2, Y2 y2, Z2 z2, W2 w2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<4, U, Q> const& v1, - vec<4, V, Q> const& v2); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<2, 4, T, Q> & operator=(mat<2, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 4, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<2, 4, T, Q> & operator+=(mat<2, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 4, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<2, 4, T, Q> & operator-=(mat<2, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 4, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<2, 4, T, Q> & operator/=(U s); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<2, 4, T, Q> & operator++ (); - GLM_FUNC_DECL mat<2, 4, T, Q> & operator-- (); - GLM_FUNC_DECL mat<2, 4, T, Q> operator++(int); - GLM_FUNC_DECL mat<2, 4, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator*(T scalar, mat<2, 4, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<2, 4, T, Q>::col_type operator*(mat<2, 4, T, Q> const& m, typename mat<2, 4, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<2, 4, T, Q>::row_type operator*(typename mat<2, 4, T, Q>::col_type const& v, mat<2, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<4, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<2, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<3, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator/(mat<2, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator/(T scalar, mat<2, 4, T, Q> const& m); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat2x4.inl" -#endif diff --git a/third_party/glm/detail/type_mat2x4.inl b/third_party/glm/detail/type_mat2x4.inl deleted file mode 100755 index b6d2b9d..0000000 --- a/third_party/glm/detail/type_mat2x4.inl +++ /dev/null @@ -1,520 +0,0 @@ -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0, 0, 0); - this->value[1] = col_type(0, 1, 0, 0); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 4, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{m[0], m[1]} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(T s) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(s, 0, 0, 0); - this->value[1] = col_type(0, s, 0, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat - ( - T x0, T y0, T z0, T w0, - T x1, T y1, T z1, T w1 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0, z0, w0), col_type(x1, y1, z1, w1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0, w0); - this->value[1] = col_type(x1, y1, z1, w1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(col_type const& v0, col_type const& v1) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = v0; - this->value[1] = v1; -# endif - } - - // -- Conversion constructors -- - - template - template< - typename X1, typename Y1, typename Z1, typename W1, - typename X2, typename Y2, typename Z2, typename W2> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat - ( - X1 x1, Y1 y1, Z1 z1, W1 w1, - X2 x2, Y2 y2, Z2 z2, W2 w2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{ - col_type(x1, y1, z1, w1), - col_type(x2, y2, z2, w2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x1, y1, z1, w1); - this->value[1] = col_type(x2, y2, z2, w2); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(vec<4, V1, Q> const& v1, vec<4, V2, Q> const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v1); - this->value[1] = col_type(v2); -# endif - } - - // -- Matrix conversions -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 4, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<2, 4, T, Q>::col_type & mat<2, 4, T, Q>::operator[](typename mat<2, 4, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 4, T, Q>::col_type const& mat<2, 4, T, Q>::operator[](typename mat<2, 4, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator=(mat<2, 4, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator+=(mat<2, 4, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator-=(mat<2, 4, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> & mat<2, 4, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> mat<2, 4, T, Q>::operator++(int) - { - mat<2, 4, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> mat<2, 4, T, Q>::operator--(int) - { - mat<2, 4, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m) - { - return mat<2, 4, T, Q>( - -m[0], - -m[1]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m, T scalar) - { - return mat<2, 4, T, Q>( - m[0] + scalar, - m[1] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) - { - return mat<2, 4, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m, T scalar) - { - return mat<2, 4, T, Q>( - m[0] - scalar, - m[1] - scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) - { - return mat<2, 4, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m, T scalar) - { - return mat<2, 4, T, Q>( - m[0] * scalar, - m[1] * scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(T scalar, mat<2, 4, T, Q> const& m) - { - return mat<2, 4, T, Q>( - m[0] * scalar, - m[1] * scalar); - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 4, T, Q>::col_type operator*(mat<2, 4, T, Q> const& m, typename mat<2, 4, T, Q>::row_type const& v) - { - return typename mat<2, 4, T, Q>::col_type( - m[0][0] * v.x + m[1][0] * v.y, - m[0][1] * v.x + m[1][1] * v.y, - m[0][2] * v.x + m[1][2] * v.y, - m[0][3] * v.x + m[1][3] * v.y); - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 4, T, Q>::row_type operator*(typename mat<2, 4, T, Q>::col_type const& v, mat<2, 4, T, Q> const& m) - { - return typename mat<2, 4, T, Q>::row_type( - v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2] + v.w * m[0][3], - v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2] + v.w * m[1][3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<4, 2, T, Q> const& m2) - { - T SrcA00 = m1[0][0]; - T SrcA01 = m1[0][1]; - T SrcA02 = m1[0][2]; - T SrcA03 = m1[0][3]; - T SrcA10 = m1[1][0]; - T SrcA11 = m1[1][1]; - T SrcA12 = m1[1][2]; - T SrcA13 = m1[1][3]; - - T SrcB00 = m2[0][0]; - T SrcB01 = m2[0][1]; - T SrcB10 = m2[1][0]; - T SrcB11 = m2[1][1]; - T SrcB20 = m2[2][0]; - T SrcB21 = m2[2][1]; - T SrcB30 = m2[3][0]; - T SrcB31 = m2[3][1]; - - mat<4, 4, T, Q> Result; - Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01; - Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01; - Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01; - Result[0][3] = SrcA03 * SrcB00 + SrcA13 * SrcB01; - Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11; - Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11; - Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11; - Result[1][3] = SrcA03 * SrcB10 + SrcA13 * SrcB11; - Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21; - Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21; - Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21; - Result[2][3] = SrcA03 * SrcB20 + SrcA13 * SrcB21; - Result[3][0] = SrcA00 * SrcB30 + SrcA10 * SrcB31; - Result[3][1] = SrcA01 * SrcB30 + SrcA11 * SrcB31; - Result[3][2] = SrcA02 * SrcB30 + SrcA12 * SrcB31; - Result[3][3] = SrcA03 * SrcB30 + SrcA13 * SrcB31; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - return mat<2, 4, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], - m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1], - m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<3, 2, T, Q> const& m2) - { - return mat<3, 4, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], - m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1], - m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1], - m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1], - m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator/(mat<2, 4, T, Q> const& m, T scalar) - { - return mat<2, 4, T, Q>( - m[0] / scalar, - m[1] / scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator/(T scalar, mat<2, 4, T, Q> const& m) - { - return mat<2, 4, T, Q>( - scalar / m[0], - scalar / m[1]); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat3x2.hpp b/third_party/glm/detail/type_mat3x2.hpp deleted file mode 100755 index e166581..0000000 --- a/third_party/glm/detail/type_mat3x2.hpp +++ /dev/null @@ -1,167 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat3x2.hpp - -#pragma once - -#include "type_vec2.hpp" -#include "type_vec3.hpp" -#include -#include - -namespace glm -{ - template - struct mat<3, 2, T, Q> - { - typedef vec<2, T, Q> col_type; - typedef vec<3, T, Q> row_type; - typedef mat<3, 2, T, Q> type; - typedef mat<2, 3, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[3]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<3, 2, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T x0, T y0, - T x1, T y1, - T x2, T y2); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1, - col_type const& v2); - - // -- Conversions -- - - template< - typename X1, typename Y1, - typename X2, typename Y2, - typename X3, typename Y3> - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X1 x1, Y1 y1, - X2 x2, Y2 y2, - X3 x3, Y3 y3); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<2, V1, Q> const& v1, - vec<2, V2, Q> const& v2, - vec<2, V3, Q> const& v3); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<3, 2, T, Q> & operator=(mat<3, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 2, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<3, 2, T, Q> & operator+=(mat<3, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 2, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<3, 2, T, Q> & operator-=(mat<3, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 2, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<3, 2, T, Q> & operator/=(U s); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<3, 2, T, Q> & operator++ (); - GLM_FUNC_DECL mat<3, 2, T, Q> & operator-- (); - GLM_FUNC_DECL mat<3, 2, T, Q> operator++(int); - GLM_FUNC_DECL mat<3, 2, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator*(T scalar, mat<3, 2, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<3, 2, T, Q>::col_type operator*(mat<3, 2, T, Q> const& m, typename mat<3, 2, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<3, 2, T, Q>::row_type operator*(typename mat<3, 2, T, Q>::col_type const& v, mat<3, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<2, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<3, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<4, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator/(mat<3, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator/(T scalar, mat<3, 2, T, Q> const& m); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); - -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat3x2.inl" -#endif diff --git a/third_party/glm/detail/type_mat3x2.inl b/third_party/glm/detail/type_mat3x2.inl deleted file mode 100755 index b4b948b..0000000 --- a/third_party/glm/detail/type_mat3x2.inl +++ /dev/null @@ -1,532 +0,0 @@ -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0), col_type(0, 1), col_type(0, 0)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0); - this->value[1] = col_type(0, 1); - this->value[2] = col_type(0, 0); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 2, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(T s) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(s, 0), col_type(0, s), col_type(0, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(s, 0); - this->value[1] = col_type(0, s); - this->value[2] = col_type(0, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat - ( - T x0, T y0, - T x1, T y1, - T x2, T y2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0); - this->value[1] = col_type(x1, y1); - this->value[2] = col_type(x2, y2); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = v0; - this->value[1] = v1; - this->value[2] = v2; -# endif - } - - // -- Conversion constructors -- - - template - template< - typename X0, typename Y0, - typename X1, typename Y1, - typename X2, typename Y2> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat - ( - X0 x0, Y0 y0, - X1 x1, Y1 y1, - X2 x2, Y2 y2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0); - this->value[1] = col_type(x1, y1); - this->value[2] = col_type(x2, y2); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(vec<2, V0, Q> const& v0, vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v0); - this->value[1] = col_type(v1); - this->value[2] = col_type(v2); -# endif - } - - // -- Matrix conversions -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 2, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<3, 2, T, Q>::col_type & mat<3, 2, T, Q>::operator[](typename mat<3, 2, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 2, T, Q>::col_type const& mat<3, 2, T, Q>::operator[](typename mat<3, 2, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator=(mat<3, 2, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - this->value[2] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator+=(mat<3, 2, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - this->value[2] += m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - this->value[2] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator-=(mat<3, 2, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - this->value[2] -= m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - this->value[2] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> & mat<3, 2, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - this->value[2] /= s; - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - ++this->value[2]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - --this->value[2]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> mat<3, 2, T, Q>::operator++(int) - { - mat<3, 2, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> mat<3, 2, T, Q>::operator--(int) - { - mat<3, 2, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m) - { - return mat<3, 2, T, Q>( - -m[0], - -m[1], - -m[2]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m, T scalar) - { - return mat<3, 2, T, Q>( - m[0] + scalar, - m[1] + scalar, - m[2] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) - { - return mat<3, 2, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1], - m1[2] + m2[2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m, T scalar) - { - return mat<3, 2, T, Q>( - m[0] - scalar, - m[1] - scalar, - m[2] - scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) - { - return mat<3, 2, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1], - m1[2] - m2[2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m, T scalar) - { - return mat<3, 2, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(T scalar, mat<3, 2, T, Q> const& m) - { - return mat<3, 2, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar); - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 2, T, Q>::col_type operator*(mat<3, 2, T, Q> const& m, typename mat<3, 2, T, Q>::row_type const& v) - { - return typename mat<3, 2, T, Q>::col_type( - m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z, - m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z); - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 2, T, Q>::row_type operator*(typename mat<3, 2, T, Q>::col_type const& v, mat<3, 2, T, Q> const& m) - { - return typename mat<3, 2, T, Q>::row_type( - v.x * m[0][0] + v.y * m[0][1], - v.x * m[1][0] + v.y * m[1][1], - v.x * m[2][0] + v.y * m[2][1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<2, 3, T, Q> const& m2) - { - const T SrcA00 = m1[0][0]; - const T SrcA01 = m1[0][1]; - const T SrcA10 = m1[1][0]; - const T SrcA11 = m1[1][1]; - const T SrcA20 = m1[2][0]; - const T SrcA21 = m1[2][1]; - - const T SrcB00 = m2[0][0]; - const T SrcB01 = m2[0][1]; - const T SrcB02 = m2[0][2]; - const T SrcB10 = m2[1][0]; - const T SrcB11 = m2[1][1]; - const T SrcB12 = m2[1][2]; - - mat<2, 2, T, Q> Result; - Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02; - Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02; - Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12; - Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - return mat<3, 2, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<4, 3, T, Q> const& m2) - { - return mat<4, 2, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2], - m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2], - m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator/(mat<3, 2, T, Q> const& m, T scalar) - { - return mat<3, 2, T, Q>( - m[0] / scalar, - m[1] / scalar, - m[2] / scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator/(T scalar, mat<3, 2, T, Q> const& m) - { - return mat<3, 2, T, Q>( - scalar / m[0], - scalar / m[1], - scalar / m[2]); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat3x3.hpp b/third_party/glm/detail/type_mat3x3.hpp deleted file mode 100755 index 3174872..0000000 --- a/third_party/glm/detail/type_mat3x3.hpp +++ /dev/null @@ -1,184 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat3x3.hpp - -#pragma once - -#include "type_vec3.hpp" -#include -#include - -namespace glm -{ - template - struct mat<3, 3, T, Q> - { - typedef vec<3, T, Q> col_type; - typedef vec<3, T, Q> row_type; - typedef mat<3, 3, T, Q> type; - typedef mat<3, 3, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[3]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<3, 3, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T x0, T y0, T z0, - T x1, T y1, T z1, - T x2, T y2, T z2); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1, - col_type const& v2); - - // -- Conversions -- - - template< - typename X1, typename Y1, typename Z1, - typename X2, typename Y2, typename Z2, - typename X3, typename Y3, typename Z3> - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X1 x1, Y1 y1, Z1 z1, - X2 x2, Y2 y2, Z2 z2, - X3 x3, Y3 y3, Z3 z3); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<3, V1, Q> const& v1, - vec<3, V2, Q> const& v2, - vec<3, V3, Q> const& v3); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator=(mat<3, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator+=(mat<3, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator-=(mat<3, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator*=(mat<3, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator/=(U s); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator/=(mat<3, 3, U, Q> const& m); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<3, 3, T, Q> & operator++(); - GLM_FUNC_DECL mat<3, 3, T, Q> & operator--(); - GLM_FUNC_DECL mat<3, 3, T, Q> operator++(int); - GLM_FUNC_DECL mat<3, 3, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator+(T scalar, mat<3, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator-(T scalar, mat<3, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator*(T scalar, mat<3, 3, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<3, 3, T, Q>::col_type operator*(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<3, 3, T, Q>::row_type operator*(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator/(T scalar, mat<3, 3, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<3, 3, T, Q>::col_type operator/(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<3, 3, T, Q>::row_type operator/(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat3x3.inl" -#endif diff --git a/third_party/glm/detail/type_mat3x3.inl b/third_party/glm/detail/type_mat3x3.inl deleted file mode 100755 index 1ddaf99..0000000 --- a/third_party/glm/detail/type_mat3x3.inl +++ /dev/null @@ -1,601 +0,0 @@ -#include "../matrix.hpp" - -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0, 0), col_type(0, 1, 0), col_type(0, 0, 1)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0, 0); - this->value[1] = col_type(0, 1, 0); - this->value[2] = col_type(0, 0, 1); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 3, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(T s) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(s, 0, 0), col_type(0, s, 0), col_type(0, 0, s)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(s, 0, 0); - this->value[1] = col_type(0, s, 0); - this->value[2] = col_type(0, 0, s); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat - ( - T x0, T y0, T z0, - T x1, T y1, T z1, - T x2, T y2, T z2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0); - this->value[1] = col_type(x1, y1, z1); - this->value[2] = col_type(x2, y2, z2); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v0); - this->value[1] = col_type(v1); - this->value[2] = col_type(v2); -# endif - } - - // -- Conversion constructors -- - - template - template< - typename X1, typename Y1, typename Z1, - typename X2, typename Y2, typename Z2, - typename X3, typename Y3, typename Z3> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat - ( - X1 x1, Y1 y1, Z1 z1, - X2 x2, Y2 y2, Z2 z2, - X3 x3, Y3 y3, Z3 z3 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x1, y1, z1); - this->value[1] = col_type(x2, y2, z2); - this->value[2] = col_type(x3, y3, z3); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2, vec<3, V3, Q> const& v3) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v1), col_type(v2), col_type(v3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v1); - this->value[1] = col_type(v2); - this->value[2] = col_type(v3); -# endif - } - - // -- Matrix conversions -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 3, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::col_type & mat<3, 3, T, Q>::operator[](typename mat<3, 3, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type const& mat<3, 3, T, Q>::operator[](typename mat<3, 3, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator=(mat<3, 3, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - this->value[2] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator+=(mat<3, 3, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - this->value[2] += m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - this->value[2] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator-=(mat<3, 3, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - this->value[2] -= m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - this->value[2] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator*=(mat<3, 3, U, Q> const& m) - { - return (*this = *this * m); - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - this->value[2] /= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator/=(mat<3, 3, U, Q> const& m) - { - return *this *= inverse(m); - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - ++this->value[2]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - --this->value[2]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> mat<3, 3, T, Q>::operator++(int) - { - mat<3, 3, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> mat<3, 3, T, Q>::operator--(int) - { - mat<3, 3, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m) - { - return mat<3, 3, T, Q>( - -m[0], - -m[1], - -m[2]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m, T scalar) - { - return mat<3, 3, T, Q>( - m[0] + scalar, - m[1] + scalar, - m[2] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator+(T scalar, mat<3, 3, T, Q> const& m) - { - return mat<3, 3, T, Q>( - m[0] + scalar, - m[1] + scalar, - m[2] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - return mat<3, 3, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1], - m1[2] + m2[2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m, T scalar) - { - return mat<3, 3, T, Q>( - m[0] - scalar, - m[1] - scalar, - m[2] - scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator-(T scalar, mat<3, 3, T, Q> const& m) - { - return mat<3, 3, T, Q>( - scalar - m[0], - scalar - m[1], - scalar - m[2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - return mat<3, 3, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1], - m1[2] - m2[2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m, T scalar) - { - return mat<3, 3, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(T scalar, mat<3, 3, T, Q> const& m) - { - return mat<3, 3, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar); - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::col_type operator*(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v) - { - return typename mat<3, 3, T, Q>::col_type( - m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z, - m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z, - m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z); - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::row_type operator*(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m) - { - return typename mat<3, 3, T, Q>::row_type( - m[0][0] * v.x + m[0][1] * v.y + m[0][2] * v.z, - m[1][0] * v.x + m[1][1] * v.y + m[1][2] * v.z, - m[2][0] * v.x + m[2][1] * v.y + m[2][2] * v.z); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - T const SrcA00 = m1[0][0]; - T const SrcA01 = m1[0][1]; - T const SrcA02 = m1[0][2]; - T const SrcA10 = m1[1][0]; - T const SrcA11 = m1[1][1]; - T const SrcA12 = m1[1][2]; - T const SrcA20 = m1[2][0]; - T const SrcA21 = m1[2][1]; - T const SrcA22 = m1[2][2]; - - T const SrcB00 = m2[0][0]; - T const SrcB01 = m2[0][1]; - T const SrcB02 = m2[0][2]; - T const SrcB10 = m2[1][0]; - T const SrcB11 = m2[1][1]; - T const SrcB12 = m2[1][2]; - T const SrcB20 = m2[2][0]; - T const SrcB21 = m2[2][1]; - T const SrcB22 = m2[2][2]; - - mat<3, 3, T, Q> Result; - Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02; - Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02; - Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01 + SrcA22 * SrcB02; - Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12; - Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12; - Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11 + SrcA22 * SrcB12; - Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21 + SrcA20 * SrcB22; - Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21 + SrcA21 * SrcB22; - Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21 + SrcA22 * SrcB22; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) - { - return mat<2, 3, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) - { - return mat<4, 3, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2], - m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2], - m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2], - m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2], - m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1] + m1[2][2] * m2[3][2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m, T scalar) - { - return mat<3, 3, T, Q>( - m[0] / scalar, - m[1] / scalar, - m[2] / scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator/(T scalar, mat<3, 3, T, Q> const& m) - { - return mat<3, 3, T, Q>( - scalar / m[0], - scalar / m[1], - scalar / m[2]); - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::col_type operator/(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v) - { - return inverse(m) * v; - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::row_type operator/(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m) - { - return v * inverse(m); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - mat<3, 3, T, Q> m1_copy(m1); - return m1_copy /= m2; - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat3x4.hpp b/third_party/glm/detail/type_mat3x4.hpp deleted file mode 100755 index 6e40b90..0000000 --- a/third_party/glm/detail/type_mat3x4.hpp +++ /dev/null @@ -1,166 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat3x4.hpp - -#pragma once - -#include "type_vec3.hpp" -#include "type_vec4.hpp" -#include -#include - -namespace glm -{ - template - struct mat<3, 4, T, Q> - { - typedef vec<4, T, Q> col_type; - typedef vec<3, T, Q> row_type; - typedef mat<3, 4, T, Q> type; - typedef mat<4, 3, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[3]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<3, 4, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T x0, T y0, T z0, T w0, - T x1, T y1, T z1, T w1, - T x2, T y2, T z2, T w2); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1, - col_type const& v2); - - // -- Conversions -- - - template< - typename X1, typename Y1, typename Z1, typename W1, - typename X2, typename Y2, typename Z2, typename W2, - typename X3, typename Y3, typename Z3, typename W3> - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X1 x1, Y1 y1, Z1 z1, W1 w1, - X2 x2, Y2 y2, Z2 z2, W2 w2, - X3 x3, Y3 y3, Z3 z3, W3 w3); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<4, V1, Q> const& v1, - vec<4, V2, Q> const& v2, - vec<4, V3, Q> const& v3); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<3, 4, T, Q> & operator=(mat<3, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 4, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<3, 4, T, Q> & operator+=(mat<3, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 4, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<3, 4, T, Q> & operator-=(mat<3, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 4, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<3, 4, T, Q> & operator/=(U s); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<3, 4, T, Q> & operator++(); - GLM_FUNC_DECL mat<3, 4, T, Q> & operator--(); - GLM_FUNC_DECL mat<3, 4, T, Q> operator++(int); - GLM_FUNC_DECL mat<3, 4, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator*(T scalar, mat<3, 4, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<3, 4, T, Q>::col_type operator*(mat<3, 4, T, Q> const& m, typename mat<3, 4, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<3, 4, T, Q>::row_type operator*(typename mat<3, 4, T, Q>::col_type const& v, mat<3, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<4, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<2, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<3, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator/(mat<3, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator/(T scalar, mat<3, 4, T, Q> const& m); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat3x4.inl" -#endif diff --git a/third_party/glm/detail/type_mat3x4.inl b/third_party/glm/detail/type_mat3x4.inl deleted file mode 100755 index 6ee416c..0000000 --- a/third_party/glm/detail/type_mat3x4.inl +++ /dev/null @@ -1,578 +0,0 @@ -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0), col_type(0, 0, 1, 0)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0, 0, 0); - this->value[1] = col_type(0, 1, 0, 0); - this->value[2] = col_type(0, 0, 1, 0); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 4, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(T s) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0), col_type(0, 0, s, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(s, 0, 0, 0); - this->value[1] = col_type(0, s, 0, 0); - this->value[2] = col_type(0, 0, s, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat - ( - T x0, T y0, T z0, T w0, - T x1, T y1, T z1, T w1, - T x2, T y2, T z2, T w2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{ - col_type(x0, y0, z0, w0), - col_type(x1, y1, z1, w1), - col_type(x2, y2, z2, w2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0, w0); - this->value[1] = col_type(x1, y1, z1, w1); - this->value[2] = col_type(x2, y2, z2, w2); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = v0; - this->value[1] = v1; - this->value[2] = v2; -# endif - } - - // -- Conversion constructors -- - - template - template< - typename X0, typename Y0, typename Z0, typename W0, - typename X1, typename Y1, typename Z1, typename W1, - typename X2, typename Y2, typename Z2, typename W2> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat - ( - X0 x0, Y0 y0, Z0 z0, W0 w0, - X1 x1, Y1 y1, Z1 z1, W1 w1, - X2 x2, Y2 y2, Z2 z2, W2 w2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{ - col_type(x0, y0, z0, w0), - col_type(x1, y1, z1, w1), - col_type(x2, y2, z2, w2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0, w0); - this->value[1] = col_type(x1, y1, z1, w1); - this->value[2] = col_type(x2, y2, z2, w2); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(vec<4, V1, Q> const& v0, vec<4, V2, Q> const& v1, vec<4, V3, Q> const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v0); - this->value[1] = col_type(v1); - this->value[2] = col_type(v2); -# endif - } - - // -- Matrix conversions -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 4, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); - this->value[2] = col_type(0, 0, 1, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(0, 0, 1, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); - this->value[2] = col_type(m[2], 1, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0, 0, 1, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); - this->value[2] = col_type(m[2], 1, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 0); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<3, 4, T, Q>::col_type & mat<3, 4, T, Q>::operator[](typename mat<3, 4, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 4, T, Q>::col_type const& mat<3, 4, T, Q>::operator[](typename mat<3, 4, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator=(mat<3, 4, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - this->value[2] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator+=(mat<3, 4, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - this->value[2] += m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - this->value[2] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator-=(mat<3, 4, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - this->value[2] -= m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - this->value[2] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> & mat<3, 4, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - this->value[2] /= s; - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - ++this->value[2]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - --this->value[2]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> mat<3, 4, T, Q>::operator++(int) - { - mat<3, 4, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> mat<3, 4, T, Q>::operator--(int) - { - mat<3, 4, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m) - { - return mat<3, 4, T, Q>( - -m[0], - -m[1], - -m[2]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m, T scalar) - { - return mat<3, 4, T, Q>( - m[0] + scalar, - m[1] + scalar, - m[2] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) - { - return mat<3, 4, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1], - m1[2] + m2[2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m, T scalar) - { - return mat<3, 4, T, Q>( - m[0] - scalar, - m[1] - scalar, - m[2] - scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) - { - return mat<3, 4, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1], - m1[2] - m2[2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m, T scalar) - { - return mat<3, 4, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(T scalar, mat<3, 4, T, Q> const& m) - { - return mat<3, 4, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar); - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 4, T, Q>::col_type operator* - ( - mat<3, 4, T, Q> const& m, - typename mat<3, 4, T, Q>::row_type const& v - ) - { - return typename mat<3, 4, T, Q>::col_type( - m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z, - m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z, - m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z, - m[0][3] * v.x + m[1][3] * v.y + m[2][3] * v.z); - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 4, T, Q>::row_type operator* - ( - typename mat<3, 4, T, Q>::col_type const& v, - mat<3, 4, T, Q> const& m - ) - { - return typename mat<3, 4, T, Q>::row_type( - v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2] + v.w * m[0][3], - v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2] + v.w * m[1][3], - v.x * m[2][0] + v.y * m[2][1] + v.z * m[2][2] + v.w * m[2][3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<4, 3, T, Q> const& m2) - { - const T SrcA00 = m1[0][0]; - const T SrcA01 = m1[0][1]; - const T SrcA02 = m1[0][2]; - const T SrcA03 = m1[0][3]; - const T SrcA10 = m1[1][0]; - const T SrcA11 = m1[1][1]; - const T SrcA12 = m1[1][2]; - const T SrcA13 = m1[1][3]; - const T SrcA20 = m1[2][0]; - const T SrcA21 = m1[2][1]; - const T SrcA22 = m1[2][2]; - const T SrcA23 = m1[2][3]; - - const T SrcB00 = m2[0][0]; - const T SrcB01 = m2[0][1]; - const T SrcB02 = m2[0][2]; - const T SrcB10 = m2[1][0]; - const T SrcB11 = m2[1][1]; - const T SrcB12 = m2[1][2]; - const T SrcB20 = m2[2][0]; - const T SrcB21 = m2[2][1]; - const T SrcB22 = m2[2][2]; - const T SrcB30 = m2[3][0]; - const T SrcB31 = m2[3][1]; - const T SrcB32 = m2[3][2]; - - mat<4, 4, T, Q> Result; - Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02; - Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02; - Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01 + SrcA22 * SrcB02; - Result[0][3] = SrcA03 * SrcB00 + SrcA13 * SrcB01 + SrcA23 * SrcB02; - Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12; - Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12; - Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11 + SrcA22 * SrcB12; - Result[1][3] = SrcA03 * SrcB10 + SrcA13 * SrcB11 + SrcA23 * SrcB12; - Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21 + SrcA20 * SrcB22; - Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21 + SrcA21 * SrcB22; - Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21 + SrcA22 * SrcB22; - Result[2][3] = SrcA03 * SrcB20 + SrcA13 * SrcB21 + SrcA23 * SrcB22; - Result[3][0] = SrcA00 * SrcB30 + SrcA10 * SrcB31 + SrcA20 * SrcB32; - Result[3][1] = SrcA01 * SrcB30 + SrcA11 * SrcB31 + SrcA21 * SrcB32; - Result[3][2] = SrcA02 * SrcB30 + SrcA12 * SrcB31 + SrcA22 * SrcB32; - Result[3][3] = SrcA03 * SrcB30 + SrcA13 * SrcB31 + SrcA23 * SrcB32; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<2, 3, T, Q> const& m2) - { - return mat<2, 4, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], - m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2], - m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - return mat<3, 4, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], - m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2], - m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2], - m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2], - m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1] + m1[2][3] * m2[2][2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator/(mat<3, 4, T, Q> const& m, T scalar) - { - return mat<3, 4, T, Q>( - m[0] / scalar, - m[1] / scalar, - m[2] / scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator/(T scalar, mat<3, 4, T, Q> const& m) - { - return mat<3, 4, T, Q>( - scalar / m[0], - scalar / m[1], - scalar / m[2]); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat4x2.hpp b/third_party/glm/detail/type_mat4x2.hpp deleted file mode 100755 index 8d34352..0000000 --- a/third_party/glm/detail/type_mat4x2.hpp +++ /dev/null @@ -1,171 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat4x2.hpp - -#pragma once - -#include "type_vec2.hpp" -#include "type_vec4.hpp" -#include -#include - -namespace glm -{ - template - struct mat<4, 2, T, Q> - { - typedef vec<2, T, Q> col_type; - typedef vec<4, T, Q> row_type; - typedef mat<4, 2, T, Q> type; - typedef mat<2, 4, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[4]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 4; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<4, 2, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T x0, T y0, - T x1, T y1, - T x2, T y2, - T x3, T y3); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1, - col_type const& v2, - col_type const& v3); - - // -- Conversions -- - - template< - typename X0, typename Y0, - typename X1, typename Y1, - typename X2, typename Y2, - typename X3, typename Y3> - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X0 x0, Y0 y0, - X1 x1, Y1 y1, - X2 x2, Y2 y2, - X3 x3, Y3 y3); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<2, V1, Q> const& v1, - vec<2, V2, Q> const& v2, - vec<2, V3, Q> const& v3, - vec<2, V4, Q> const& v4); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<4, 2, T, Q> & operator=(mat<4, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 2, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<4, 2, T, Q> & operator+=(mat<4, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 2, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<4, 2, T, Q> & operator-=(mat<4, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 2, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<4, 2, T, Q> & operator/=(U s); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<4, 2, T, Q> & operator++ (); - GLM_FUNC_DECL mat<4, 2, T, Q> & operator-- (); - GLM_FUNC_DECL mat<4, 2, T, Q> operator++(int); - GLM_FUNC_DECL mat<4, 2, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator*(T scalar, mat<4, 2, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<4, 2, T, Q>::col_type operator*(mat<4, 2, T, Q> const& m, typename mat<4, 2, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<4, 2, T, Q>::row_type operator*(typename mat<4, 2, T, Q>::col_type const& v, mat<4, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<2, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<3, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<4, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator/(mat<4, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator/(T scalar, mat<4, 2, T, Q> const& m); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat4x2.inl" -#endif diff --git a/third_party/glm/detail/type_mat4x2.inl b/third_party/glm/detail/type_mat4x2.inl deleted file mode 100755 index 419c80c..0000000 --- a/third_party/glm/detail/type_mat4x2.inl +++ /dev/null @@ -1,574 +0,0 @@ -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0), col_type(0, 1), col_type(0, 0), col_type(0, 0)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0); - this->value[1] = col_type(0, 1); - this->value[2] = col_type(0, 0); - this->value[3] = col_type(0, 0); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 2, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - this->value[3] = m[3]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(T s) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(s, 0), col_type(0, s), col_type(0, 0), col_type(0, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(s, 0); - this->value[1] = col_type(0, s); - this->value[2] = col_type(0, 0); - this->value[3] = col_type(0, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat - ( - T x0, T y0, - T x1, T y1, - T x2, T y2, - T x3, T y3 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2), col_type(x3, y3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0); - this->value[1] = col_type(x1, y1); - this->value[2] = col_type(x2, y2); - this->value[3] = col_type(x3, y3); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = v0; - this->value[1] = v1; - this->value[2] = v2; - this->value[3] = v3; -# endif - } - - // -- Conversion constructors -- - - template - template< - typename X0, typename Y0, - typename X1, typename Y1, - typename X2, typename Y2, - typename X3, typename Y3> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat - ( - X0 x0, Y0 y0, - X1 x1, Y1 y1, - X2 x2, Y2 y2, - X3 x3, Y3 y3 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2), col_type(x3, y3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0); - this->value[1] = col_type(x1, y1); - this->value[2] = col_type(x2, y2); - this->value[3] = col_type(x3, y3); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(vec<2, V0, Q> const& v0, vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2, vec<2, V3, Q> const& v3) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v0); - this->value[1] = col_type(v1); - this->value[2] = col_type(v2); - this->value[3] = col_type(v3); -# endif - } - - // -- Conversion -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 2, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(m[3]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(m[3]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(m[3]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(0); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<4, 2, T, Q>::col_type & mat<4, 2, T, Q>::operator[](typename mat<4, 2, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 2, T, Q>::col_type const& mat<4, 2, T, Q>::operator[](typename mat<4, 2, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q>& mat<4, 2, T, Q>::operator=(mat<4, 2, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - this->value[3] = m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - this->value[2] += s; - this->value[3] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator+=(mat<4, 2, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - this->value[2] += m[2]; - this->value[3] += m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - this->value[2] -= s; - this->value[3] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator-=(mat<4, 2, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - this->value[2] -= m[2]; - this->value[3] -= m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - this->value[2] *= s; - this->value[3] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - this->value[2] /= s; - this->value[3] /= s; - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - ++this->value[2]; - ++this->value[3]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - --this->value[2]; - --this->value[3]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> mat<4, 2, T, Q>::operator++(int) - { - mat<4, 2, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> mat<4, 2, T, Q>::operator--(int) - { - mat<4, 2, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m) - { - return mat<4, 2, T, Q>( - -m[0], - -m[1], - -m[2], - -m[3]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m, T scalar) - { - return mat<4, 2, T, Q>( - m[0] + scalar, - m[1] + scalar, - m[2] + scalar, - m[3] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) - { - return mat<4, 2, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1], - m1[2] + m2[2], - m1[3] + m2[3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m, T scalar) - { - return mat<4, 2, T, Q>( - m[0] - scalar, - m[1] - scalar, - m[2] - scalar, - m[3] - scalar); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) - { - return mat<4, 2, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1], - m1[2] - m2[2], - m1[3] - m2[3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m, T scalar) - { - return mat<4, 2, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar, - m[3] * scalar); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(T scalar, mat<4, 2, T, Q> const& m) - { - return mat<4, 2, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar, - m[3] * scalar); - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 2, T, Q>::col_type operator*(mat<4, 2, T, Q> const& m, typename mat<4, 2, T, Q>::row_type const& v) - { - return typename mat<4, 2, T, Q>::col_type( - m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z + m[3][0] * v.w, - m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z + m[3][1] * v.w); - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 2, T, Q>::row_type operator*(typename mat<4, 2, T, Q>::col_type const& v, mat<4, 2, T, Q> const& m) - { - return typename mat<4, 2, T, Q>::row_type( - v.x * m[0][0] + v.y * m[0][1], - v.x * m[1][0] + v.y * m[1][1], - v.x * m[2][0] + v.y * m[2][1], - v.x * m[3][0] + v.y * m[3][1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<2, 4, T, Q> const& m2) - { - T const SrcA00 = m1[0][0]; - T const SrcA01 = m1[0][1]; - T const SrcA10 = m1[1][0]; - T const SrcA11 = m1[1][1]; - T const SrcA20 = m1[2][0]; - T const SrcA21 = m1[2][1]; - T const SrcA30 = m1[3][0]; - T const SrcA31 = m1[3][1]; - - T const SrcB00 = m2[0][0]; - T const SrcB01 = m2[0][1]; - T const SrcB02 = m2[0][2]; - T const SrcB03 = m2[0][3]; - T const SrcB10 = m2[1][0]; - T const SrcB11 = m2[1][1]; - T const SrcB12 = m2[1][2]; - T const SrcB13 = m2[1][3]; - - mat<2, 2, T, Q> Result; - Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02 + SrcA30 * SrcB03; - Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02 + SrcA31 * SrcB03; - Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12 + SrcA30 * SrcB13; - Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12 + SrcA31 * SrcB13; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<3, 4, T, Q> const& m2) - { - return mat<3, 2, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - return mat<4, 2, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3], - m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2] + m1[3][0] * m2[3][3], - m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2] + m1[3][1] * m2[3][3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator/(mat<4, 2, T, Q> const& m, T scalar) - { - return mat<4, 2, T, Q>( - m[0] / scalar, - m[1] / scalar, - m[2] / scalar, - m[3] / scalar); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator/(T scalar, mat<4, 2, T, Q> const& m) - { - return mat<4, 2, T, Q>( - scalar / m[0], - scalar / m[1], - scalar / m[2], - scalar / m[3]); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat4x3.hpp b/third_party/glm/detail/type_mat4x3.hpp deleted file mode 100755 index 16e4270..0000000 --- a/third_party/glm/detail/type_mat4x3.hpp +++ /dev/null @@ -1,171 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat4x3.hpp - -#pragma once - -#include "type_vec3.hpp" -#include "type_vec4.hpp" -#include -#include - -namespace glm -{ - template - struct mat<4, 3, T, Q> - { - typedef vec<3, T, Q> col_type; - typedef vec<4, T, Q> row_type; - typedef mat<4, 3, T, Q> type; - typedef mat<3, 4, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[4]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 4; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<4, 3, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T const& x); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T const& x0, T const& y0, T const& z0, - T const& x1, T const& y1, T const& z1, - T const& x2, T const& y2, T const& z2, - T const& x3, T const& y3, T const& z3); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1, - col_type const& v2, - col_type const& v3); - - // -- Conversions -- - - template< - typename X1, typename Y1, typename Z1, - typename X2, typename Y2, typename Z2, - typename X3, typename Y3, typename Z3, - typename X4, typename Y4, typename Z4> - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X1 const& x1, Y1 const& y1, Z1 const& z1, - X2 const& x2, Y2 const& y2, Z2 const& z2, - X3 const& x3, Y3 const& y3, Z3 const& z3, - X4 const& x4, Y4 const& y4, Z4 const& z4); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<3, V1, Q> const& v1, - vec<3, V2, Q> const& v2, - vec<3, V3, Q> const& v3, - vec<3, V4, Q> const& v4); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<4, 3, T, Q> & operator=(mat<4, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 3, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<4, 3, T, Q> & operator+=(mat<4, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 3, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<4, 3, T, Q> & operator-=(mat<4, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 3, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<4, 3, T, Q> & operator/=(U s); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<4, 3, T, Q>& operator++(); - GLM_FUNC_DECL mat<4, 3, T, Q>& operator--(); - GLM_FUNC_DECL mat<4, 3, T, Q> operator++(int); - GLM_FUNC_DECL mat<4, 3, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator*(T const& s, mat<4, 3, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<4, 3, T, Q>::col_type operator*(mat<4, 3, T, Q> const& m, typename mat<4, 3, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<4, 3, T, Q>::row_type operator*(typename mat<4, 3, T, Q>::col_type const& v, mat<4, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<2, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<3, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<4, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator/(mat<4, 3, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator/(T const& s, mat<4, 3, T, Q> const& m); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat4x3.inl" -#endif //GLM_EXTERNAL_TEMPLATE diff --git a/third_party/glm/detail/type_mat4x3.inl b/third_party/glm/detail/type_mat4x3.inl deleted file mode 100755 index 11b1ee3..0000000 --- a/third_party/glm/detail/type_mat4x3.inl +++ /dev/null @@ -1,598 +0,0 @@ -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0, 0), col_type(0, 1, 0), col_type(0, 0, 1), col_type(0, 0, 0)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0, 0); - this->value[1] = col_type(0, 1, 0); - this->value[2] = col_type(0, 0, 1); - this->value[3] = col_type(0, 0, 0); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 3, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - this->value[3] = m[3]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(T const& s) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(s, 0, 0), col_type(0, s, 0), col_type(0, 0, s), col_type(0, 0, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(s, 0, 0); - this->value[1] = col_type(0, s, 0); - this->value[2] = col_type(0, 0, s); - this->value[3] = col_type(0, 0, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat - ( - T const& x0, T const& y0, T const& z0, - T const& x1, T const& y1, T const& z1, - T const& x2, T const& y2, T const& z2, - T const& x3, T const& y3, T const& z3 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0); - this->value[1] = col_type(x1, y1, z1); - this->value[2] = col_type(x2, y2, z2); - this->value[3] = col_type(x3, y3, z3); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = v0; - this->value[1] = v1; - this->value[2] = v2; - this->value[3] = v3; -# endif - } - - // -- Conversion constructors -- - - template - template< - typename X0, typename Y0, typename Z0, - typename X1, typename Y1, typename Z1, - typename X2, typename Y2, typename Z2, - typename X3, typename Y3, typename Z3> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat - ( - X0 const& x0, Y0 const& y0, Z0 const& z0, - X1 const& x1, Y1 const& y1, Z1 const& z1, - X2 const& x2, Y2 const& y2, Z2 const& z2, - X3 const& x3, Y3 const& y3, Z3 const& z3 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0); - this->value[1] = col_type(x1, y1, z1); - this->value[2] = col_type(x2, y2, z2); - this->value[3] = col_type(x3, y3, z3); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2, vec<3, V3, Q> const& v3, vec<3, V4, Q> const& v4) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v1), col_type(v2), col_type(v3), col_type(v4)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v1); - this->value[1] = col_type(v2); - this->value[2] = col_type(v3); - this->value[3] = col_type(v4); -# endif - } - - // -- Matrix conversions -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 3, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(m[3]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(0, 0, 1); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(m[3]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0, 0, 1); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 1); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0, 0, 1); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1), col_type(m[3], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 1); - this->value[3] = col_type(m[3], 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(0); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<4, 3, T, Q>::col_type & mat<4, 3, T, Q>::operator[](typename mat<4, 3, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 3, T, Q>::col_type const& mat<4, 3, T, Q>::operator[](typename mat<4, 3, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q>& mat<4, 3, T, Q>::operator=(mat<4, 3, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - this->value[3] = m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - this->value[2] += s; - this->value[3] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator+=(mat<4, 3, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - this->value[2] += m[2]; - this->value[3] += m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - this->value[2] -= s; - this->value[3] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator-=(mat<4, 3, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - this->value[2] -= m[2]; - this->value[3] -= m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - this->value[2] *= s; - this->value[3] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - this->value[2] /= s; - this->value[3] /= s; - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - ++this->value[2]; - ++this->value[3]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - --this->value[2]; - --this->value[3]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> mat<4, 3, T, Q>::operator++(int) - { - mat<4, 3, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> mat<4, 3, T, Q>::operator--(int) - { - mat<4, 3, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m) - { - return mat<4, 3, T, Q>( - -m[0], - -m[1], - -m[2], - -m[3]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m, T const& s) - { - return mat<4, 3, T, Q>( - m[0] + s, - m[1] + s, - m[2] + s, - m[3] + s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) - { - return mat<4, 3, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1], - m1[2] + m2[2], - m1[3] + m2[3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m, T const& s) - { - return mat<4, 3, T, Q>( - m[0] - s, - m[1] - s, - m[2] - s, - m[3] - s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) - { - return mat<4, 3, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1], - m1[2] - m2[2], - m1[3] - m2[3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m, T const& s) - { - return mat<4, 3, T, Q>( - m[0] * s, - m[1] * s, - m[2] * s, - m[3] * s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(T const& s, mat<4, 3, T, Q> const& m) - { - return mat<4, 3, T, Q>( - m[0] * s, - m[1] * s, - m[2] * s, - m[3] * s); - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 3, T, Q>::col_type operator* - ( - mat<4, 3, T, Q> const& m, - typename mat<4, 3, T, Q>::row_type const& v) - { - return typename mat<4, 3, T, Q>::col_type( - m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z + m[3][0] * v.w, - m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z + m[3][1] * v.w, - m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z + m[3][2] * v.w); - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 3, T, Q>::row_type operator* - ( - typename mat<4, 3, T, Q>::col_type const& v, - mat<4, 3, T, Q> const& m) - { - return typename mat<4, 3, T, Q>::row_type( - v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2], - v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2], - v.x * m[2][0] + v.y * m[2][1] + v.z * m[2][2], - v.x * m[3][0] + v.y * m[3][1] + v.z * m[3][2]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<2, 4, T, Q> const& m2) - { - return mat<2, 3, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<3, 4, T, Q> const& m2) - { - T const SrcA00 = m1[0][0]; - T const SrcA01 = m1[0][1]; - T const SrcA02 = m1[0][2]; - T const SrcA10 = m1[1][0]; - T const SrcA11 = m1[1][1]; - T const SrcA12 = m1[1][2]; - T const SrcA20 = m1[2][0]; - T const SrcA21 = m1[2][1]; - T const SrcA22 = m1[2][2]; - T const SrcA30 = m1[3][0]; - T const SrcA31 = m1[3][1]; - T const SrcA32 = m1[3][2]; - - T const SrcB00 = m2[0][0]; - T const SrcB01 = m2[0][1]; - T const SrcB02 = m2[0][2]; - T const SrcB03 = m2[0][3]; - T const SrcB10 = m2[1][0]; - T const SrcB11 = m2[1][1]; - T const SrcB12 = m2[1][2]; - T const SrcB13 = m2[1][3]; - T const SrcB20 = m2[2][0]; - T const SrcB21 = m2[2][1]; - T const SrcB22 = m2[2][2]; - T const SrcB23 = m2[2][3]; - - mat<3, 3, T, Q> Result; - Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02 + SrcA30 * SrcB03; - Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02 + SrcA31 * SrcB03; - Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01 + SrcA22 * SrcB02 + SrcA32 * SrcB03; - Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12 + SrcA30 * SrcB13; - Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12 + SrcA31 * SrcB13; - Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11 + SrcA22 * SrcB12 + SrcA32 * SrcB13; - Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21 + SrcA20 * SrcB22 + SrcA30 * SrcB23; - Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21 + SrcA21 * SrcB22 + SrcA31 * SrcB23; - Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21 + SrcA22 * SrcB22 + SrcA32 * SrcB23; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - return mat<4, 3, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3], - m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2] + m1[3][2] * m2[2][3], - m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2] + m1[3][0] * m2[3][3], - m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2] + m1[3][1] * m2[3][3], - m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1] + m1[2][2] * m2[3][2] + m1[3][2] * m2[3][3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator/(mat<4, 3, T, Q> const& m, T const& s) - { - return mat<4, 3, T, Q>( - m[0] / s, - m[1] / s, - m[2] / s, - m[3] / s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator/(T const& s, mat<4, 3, T, Q> const& m) - { - return mat<4, 3, T, Q>( - s / m[0], - s / m[1], - s / m[2], - s / m[3]); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat4x4.hpp b/third_party/glm/detail/type_mat4x4.hpp deleted file mode 100755 index 3517f9f..0000000 --- a/third_party/glm/detail/type_mat4x4.hpp +++ /dev/null @@ -1,189 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat4x4.hpp - -#pragma once - -#include "type_vec4.hpp" -#include -#include - -namespace glm -{ - template - struct mat<4, 4, T, Q> - { - typedef vec<4, T, Q> col_type; - typedef vec<4, T, Q> row_type; - typedef mat<4, 4, T, Q> type; - typedef mat<4, 4, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[4]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;} - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<4, 4, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T const& x); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T const& x0, T const& y0, T const& z0, T const& w0, - T const& x1, T const& y1, T const& z1, T const& w1, - T const& x2, T const& y2, T const& z2, T const& w2, - T const& x3, T const& y3, T const& z3, T const& w3); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1, - col_type const& v2, - col_type const& v3); - - // -- Conversions -- - - template< - typename X1, typename Y1, typename Z1, typename W1, - typename X2, typename Y2, typename Z2, typename W2, - typename X3, typename Y3, typename Z3, typename W3, - typename X4, typename Y4, typename Z4, typename W4> - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X1 const& x1, Y1 const& y1, Z1 const& z1, W1 const& w1, - X2 const& x2, Y2 const& y2, Z2 const& z2, W2 const& w2, - X3 const& x3, Y3 const& y3, Z3 const& z3, W3 const& w3, - X4 const& x4, Y4 const& y4, Z4 const& z4, W4 const& w4); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<4, V1, Q> const& v1, - vec<4, V2, Q> const& v2, - vec<4, V3, Q> const& v3, - vec<4, V4, Q> const& v4); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator=(mat<4, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator+=(mat<4, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator-=(mat<4, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator*=(mat<4, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator/=(U s); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator/=(mat<4, 4, U, Q> const& m); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<4, 4, T, Q> & operator++(); - GLM_FUNC_DECL mat<4, 4, T, Q> & operator--(); - GLM_FUNC_DECL mat<4, 4, T, Q> operator++(int); - GLM_FUNC_DECL mat<4, 4, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator+(T const& s, mat<4, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator-(T const& s, mat<4, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator*(T const& s, mat<4, 4, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<4, 4, T, Q>::col_type operator*(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<4, 4, T, Q>::row_type operator*(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator/(T const& s, mat<4, 4, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<4, 4, T, Q>::col_type operator/(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<4, 4, T, Q>::row_type operator/(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat4x4.inl" -#endif//GLM_EXTERNAL_TEMPLATE diff --git a/third_party/glm/detail/type_mat4x4.inl b/third_party/glm/detail/type_mat4x4.inl deleted file mode 100755 index e38b87f..0000000 --- a/third_party/glm/detail/type_mat4x4.inl +++ /dev/null @@ -1,706 +0,0 @@ -#include "../matrix.hpp" - -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0, 0, 0); - this->value[1] = col_type(0, 1, 0, 0); - this->value[2] = col_type(0, 0, 1, 0); - this->value[3] = col_type(0, 0, 0, 1); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 4, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - this->value[3] = m[3]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(T const& s) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0), col_type(0, 0, s, 0), col_type(0, 0, 0, s)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(s, 0, 0, 0); - this->value[1] = col_type(0, s, 0, 0); - this->value[2] = col_type(0, 0, s, 0); - this->value[3] = col_type(0, 0, 0, s); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat - ( - T const& x0, T const& y0, T const& z0, T const& w0, - T const& x1, T const& y1, T const& z1, T const& w1, - T const& x2, T const& y2, T const& z2, T const& w2, - T const& x3, T const& y3, T const& z3, T const& w3 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{ - col_type(x0, y0, z0, w0), - col_type(x1, y1, z1, w1), - col_type(x2, y2, z2, w2), - col_type(x3, y3, z3, w3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0, w0); - this->value[1] = col_type(x1, y1, z1, w1); - this->value[2] = col_type(x2, y2, z2, w2); - this->value[3] = col_type(x3, y3, z3, w3); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = v0; - this->value[1] = v1; - this->value[2] = v2; - this->value[3] = v3; -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 4, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(m[3]); -# endif - } - - // -- Conversions -- - - template - template< - typename X1, typename Y1, typename Z1, typename W1, - typename X2, typename Y2, typename Z2, typename W2, - typename X3, typename Y3, typename Z3, typename W3, - typename X4, typename Y4, typename Z4, typename W4> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat - ( - X1 const& x1, Y1 const& y1, Z1 const& z1, W1 const& w1, - X2 const& x2, Y2 const& y2, Z2 const& z2, W2 const& w2, - X3 const& x3, Y3 const& y3, Z3 const& z3, W3 const& w3, - X4 const& x4, Y4 const& y4, Z4 const& z4, W4 const& w4 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x1, y1, z1, w1), col_type(x2, y2, z2, w2), col_type(x3, y3, z3, w3), col_type(x4, y4, z4, w4)} -# endif - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 1st parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 2nd parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 3rd parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 4th parameter type invalid."); - - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 5th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 6th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 7th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 8th parameter type invalid."); - - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 9th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 10th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 11th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 12th parameter type invalid."); - - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 13th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 14th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 15th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 16th parameter type invalid."); - -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x1, y1, z1, w1); - this->value[1] = col_type(x2, y2, z2, w2); - this->value[2] = col_type(x3, y3, z3, w3); - this->value[3] = col_type(x4, y4, z4, w4); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(vec<4, V1, Q> const& v1, vec<4, V2, Q> const& v2, vec<4, V3, Q> const& v3, vec<4, V4, Q> const& v4) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v1), col_type(v2), col_type(v3), col_type(v4)} -# endif - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 1st parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 2nd parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 3rd parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 4th parameter type invalid."); - -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v1); - this->value[1] = col_type(v2); - this->value[2] = col_type(v3); - this->value[3] = col_type(v4); -# endif - } - - // -- Matrix conversions -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); - this->value[2] = col_type(0, 0, 1, 0); - this->value[3] = col_type(0, 0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0), col_type(0, 0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 0); - this->value[3] = col_type(0, 0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(0, 0, 1, 0); - this->value[3] = col_type(0, 0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0), col_type(0, 0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); - this->value[2] = col_type(m[2], 1, 0); - this->value[3] = col_type(0, 0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = col_type(0, 0, 1, 0); - this->value[3] = col_type(0, 0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); - this->value[2] = col_type(0, 0, 1, 0); - this->value[3] = col_type(0, 0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0, 0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - this->value[3] = col_type(0, 0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0), col_type(m[3], 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 0); - this->value[3] = col_type(m[3], 1); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::col_type & mat<4, 4, T, Q>::operator[](typename mat<4, 4, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type const& mat<4, 4, T, Q>::operator[](typename mat<4, 4, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary arithmetic operators -- - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator=(mat<4, 4, U, Q> const& m) - { - //memcpy could be faster - //memcpy(&this->value, &m.value, 16 * sizeof(valType)); - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - this->value[3] = m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - this->value[2] += s; - this->value[3] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator+=(mat<4, 4, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - this->value[2] += m[2]; - this->value[3] += m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - this->value[2] -= s; - this->value[3] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator-=(mat<4, 4, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - this->value[2] -= m[2]; - this->value[3] -= m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - this->value[2] *= s; - this->value[3] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator*=(mat<4, 4, U, Q> const& m) - { - return (*this = *this * m); - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - this->value[2] /= s; - this->value[3] /= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator/=(mat<4, 4, U, Q> const& m) - { - return *this *= inverse(m); - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - ++this->value[2]; - ++this->value[3]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - --this->value[2]; - --this->value[3]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> mat<4, 4, T, Q>::operator++(int) - { - mat<4, 4, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> mat<4, 4, T, Q>::operator--(int) - { - mat<4, 4, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary constant operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m) - { - return mat<4, 4, T, Q>( - -m[0], - -m[1], - -m[2], - -m[3]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m, T const& s) - { - return mat<4, 4, T, Q>( - m[0] + s, - m[1] + s, - m[2] + s, - m[3] + s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator+(T const& s, mat<4, 4, T, Q> const& m) - { - return mat<4, 4, T, Q>( - m[0] + s, - m[1] + s, - m[2] + s, - m[3] + s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - return mat<4, 4, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1], - m1[2] + m2[2], - m1[3] + m2[3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m, T const& s) - { - return mat<4, 4, T, Q>( - m[0] - s, - m[1] - s, - m[2] - s, - m[3] - s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator-(T const& s, mat<4, 4, T, Q> const& m) - { - return mat<4, 4, T, Q>( - s - m[0], - s - m[1], - s - m[2], - s - m[3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - return mat<4, 4, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1], - m1[2] - m2[2], - m1[3] - m2[3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m, T const & s) - { - return mat<4, 4, T, Q>( - m[0] * s, - m[1] * s, - m[2] * s, - m[3] * s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(T const& s, mat<4, 4, T, Q> const& m) - { - return mat<4, 4, T, Q>( - m[0] * s, - m[1] * s, - m[2] * s, - m[3] * s); - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::col_type operator* - ( - mat<4, 4, T, Q> const& m, - typename mat<4, 4, T, Q>::row_type const& v - ) - { -/* - __m128 v0 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 v1 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(1, 1, 1, 1)); - __m128 v2 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(2, 2, 2, 2)); - __m128 v3 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(m[0].data, v0); - __m128 m1 = _mm_mul_ps(m[1].data, v1); - __m128 a0 = _mm_add_ps(m0, m1); - - __m128 m2 = _mm_mul_ps(m[2].data, v2); - __m128 m3 = _mm_mul_ps(m[3].data, v3); - __m128 a1 = _mm_add_ps(m2, m3); - - __m128 a2 = _mm_add_ps(a0, a1); - - return typename mat<4, 4, T, Q>::col_type(a2); -*/ - - typename mat<4, 4, T, Q>::col_type const Mov0(v[0]); - typename mat<4, 4, T, Q>::col_type const Mov1(v[1]); - typename mat<4, 4, T, Q>::col_type const Mul0 = m[0] * Mov0; - typename mat<4, 4, T, Q>::col_type const Mul1 = m[1] * Mov1; - typename mat<4, 4, T, Q>::col_type const Add0 = Mul0 + Mul1; - typename mat<4, 4, T, Q>::col_type const Mov2(v[2]); - typename mat<4, 4, T, Q>::col_type const Mov3(v[3]); - typename mat<4, 4, T, Q>::col_type const Mul2 = m[2] * Mov2; - typename mat<4, 4, T, Q>::col_type const Mul3 = m[3] * Mov3; - typename mat<4, 4, T, Q>::col_type const Add1 = Mul2 + Mul3; - typename mat<4, 4, T, Q>::col_type const Add2 = Add0 + Add1; - return Add2; - -/* - return typename mat<4, 4, T, Q>::col_type( - m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0] * v[3], - m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1] * v[3], - m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2] * v[3], - m[0][3] * v[0] + m[1][3] * v[1] + m[2][3] * v[2] + m[3][3] * v[3]); -*/ - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::row_type operator* - ( - typename mat<4, 4, T, Q>::col_type const& v, - mat<4, 4, T, Q> const& m - ) - { - return typename mat<4, 4, T, Q>::row_type( - m[0][0] * v[0] + m[0][1] * v[1] + m[0][2] * v[2] + m[0][3] * v[3], - m[1][0] * v[0] + m[1][1] * v[1] + m[1][2] * v[2] + m[1][3] * v[3], - m[2][0] * v[0] + m[2][1] * v[1] + m[2][2] * v[2] + m[2][3] * v[3], - m[3][0] * v[0] + m[3][1] * v[1] + m[3][2] * v[2] + m[3][3] * v[3]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) - { - return mat<2, 4, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], - m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2] + m1[3][3] * m2[0][3], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3], - m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2] + m1[3][3] * m2[1][3]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) - { - return mat<3, 4, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], - m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2] + m1[3][3] * m2[0][3], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3], - m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2] + m1[3][3] * m2[1][3], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3], - m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2] + m1[3][2] * m2[2][3], - m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1] + m1[2][3] * m2[2][2] + m1[3][3] * m2[2][3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - typename mat<4, 4, T, Q>::col_type const SrcA0 = m1[0]; - typename mat<4, 4, T, Q>::col_type const SrcA1 = m1[1]; - typename mat<4, 4, T, Q>::col_type const SrcA2 = m1[2]; - typename mat<4, 4, T, Q>::col_type const SrcA3 = m1[3]; - - typename mat<4, 4, T, Q>::col_type const SrcB0 = m2[0]; - typename mat<4, 4, T, Q>::col_type const SrcB1 = m2[1]; - typename mat<4, 4, T, Q>::col_type const SrcB2 = m2[2]; - typename mat<4, 4, T, Q>::col_type const SrcB3 = m2[3]; - - mat<4, 4, T, Q> Result; - Result[0] = SrcA0 * SrcB0[0] + SrcA1 * SrcB0[1] + SrcA2 * SrcB0[2] + SrcA3 * SrcB0[3]; - Result[1] = SrcA0 * SrcB1[0] + SrcA1 * SrcB1[1] + SrcA2 * SrcB1[2] + SrcA3 * SrcB1[3]; - Result[2] = SrcA0 * SrcB2[0] + SrcA1 * SrcB2[1] + SrcA2 * SrcB2[2] + SrcA3 * SrcB2[3]; - Result[3] = SrcA0 * SrcB3[0] + SrcA1 * SrcB3[1] + SrcA2 * SrcB3[2] + SrcA3 * SrcB3[3]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m, T const& s) - { - return mat<4, 4, T, Q>( - m[0] / s, - m[1] / s, - m[2] / s, - m[3] / s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator/(T const& s, mat<4, 4, T, Q> const& m) - { - return mat<4, 4, T, Q>( - s / m[0], - s / m[1], - s / m[2], - s / m[3]); - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::col_type operator/(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v) - { - return inverse(m) * v; - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::row_type operator/(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m) - { - return v * inverse(m); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - mat<4, 4, T, Q> m1_copy(m1); - return m1_copy /= m2; - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "type_mat4x4_simd.inl" -#endif diff --git a/third_party/glm/detail/type_mat4x4_simd.inl b/third_party/glm/detail/type_mat4x4_simd.inl deleted file mode 100755 index fb3a16f..0000000 --- a/third_party/glm/detail/type_mat4x4_simd.inl +++ /dev/null @@ -1,6 +0,0 @@ -/// @ref core - -namespace glm -{ - -}//namespace glm diff --git a/third_party/glm/detail/type_quat.hpp b/third_party/glm/detail/type_quat.hpp deleted file mode 100755 index 0e60bc3..0000000 --- a/third_party/glm/detail/type_quat.hpp +++ /dev/null @@ -1,186 +0,0 @@ -/// @ref core -/// @file glm/detail/type_quat.hpp - -#pragma once - -// Dependency: -#include "../detail/type_mat3x3.hpp" -#include "../detail/type_mat4x4.hpp" -#include "../detail/type_vec3.hpp" -#include "../detail/type_vec4.hpp" -#include "../ext/vector_relational.hpp" -#include "../ext/quaternion_relational.hpp" -#include "../gtc/constants.hpp" -#include "../gtc/matrix_transform.hpp" - -namespace glm -{ - template - struct qua - { - // -- Implementation detail -- - - typedef qua type; - typedef T value_type; - - // -- Data -- - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wpedantic" -# elif GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" -# pragma clang diagnostic ignored "-Wnested-anon-types" -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(push) -# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union -# endif -# endif - -# if GLM_LANG & GLM_LANG_CXXMS_FLAG - union - { -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - struct { T w, x, y, z; }; -# else - struct { T x, y, z, w; }; -# endif - - typename detail::storage<4, T, detail::is_aligned::value>::type data; - }; -# else -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - T w, x, y, z; -# else - T x, y, z, w; -# endif -# endif - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(pop) -# endif -# endif - - // -- Component accesses -- - - typedef length_t length_type; - - /// Return the count of components of a quaternion - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;} - - GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; - - // -- Implicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR qua() GLM_DEFAULT; - GLM_FUNC_DECL GLM_CONSTEXPR qua(qua const& q) GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR qua(qua const& q); - - // -- Explicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR qua(T s, vec<3, T, Q> const& v); - GLM_FUNC_DECL GLM_CONSTEXPR qua(T w, T x, T y, T z); - - // -- Conversion constructors -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT qua(qua const& q); - - /// Explicit conversion operators -# if GLM_HAS_EXPLICIT_CONVERSION_OPERATORS - GLM_FUNC_DECL explicit operator mat<3, 3, T, Q>() const; - GLM_FUNC_DECL explicit operator mat<4, 4, T, Q>() const; -# endif - - /// Create a quaternion from two normalized axis - /// - /// @param u A first normalized axis - /// @param v A second normalized axis - /// @see gtc_quaternion - /// @see http://lolengine.net/blog/2013/09/18/beautiful-maths-quaternion-from-vectors - GLM_FUNC_DECL qua(vec<3, T, Q> const& u, vec<3, T, Q> const& v); - - /// Build a quaternion from euler angles (pitch, yaw, roll), in radians. - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT qua(vec<3, T, Q> const& eulerAngles); - GLM_FUNC_DECL GLM_EXPLICIT qua(mat<3, 3, T, Q> const& q); - GLM_FUNC_DECL GLM_EXPLICIT qua(mat<4, 4, T, Q> const& q); - - // -- Unary arithmetic operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR qua& operator=(qua const& q) GLM_DEFAULT; - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua& operator=(qua const& q); - template - GLM_FUNC_DECL GLM_CONSTEXPR qua& operator+=(qua const& q); - template - GLM_FUNC_DECL GLM_CONSTEXPR qua& operator-=(qua const& q); - template - GLM_FUNC_DECL GLM_CONSTEXPR qua& operator*=(qua const& q); - template - GLM_FUNC_DECL GLM_CONSTEXPR qua& operator*=(U s); - template - GLM_FUNC_DECL GLM_CONSTEXPR qua& operator/=(U s); - }; - - // -- Unary bit operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator+(qua const& q); - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator-(qua const& q); - - // -- Binary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator+(qua const& q, qua const& p); - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator-(qua const& q, qua const& p); - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator*(qua const& q, qua const& p); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(qua const& q, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, qua const& q); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(qua const& q, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, qua const& q); - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator*(qua const& q, T const& s); - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator*(T const& s, qua const& q); - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator/(qua const& q, T const& s); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(qua const& q1, qua const& q2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(qua const& q1, qua const& q2); -} //namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_quat.inl" -#endif//GLM_EXTERNAL_TEMPLATE diff --git a/third_party/glm/detail/type_quat.inl b/third_party/glm/detail/type_quat.inl deleted file mode 100755 index 67b9310..0000000 --- a/third_party/glm/detail/type_quat.inl +++ /dev/null @@ -1,408 +0,0 @@ -#include "../trigonometric.hpp" -#include "../exponential.hpp" -#include "../ext/quaternion_geometric.hpp" -#include - -namespace glm{ -namespace detail -{ - template - struct genTypeTrait > - { - static const genTypeEnum GENTYPE = GENTYPE_QUAT; - }; - - template - struct compute_dot, T, Aligned> - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(qua const& a, qua const& b) - { - vec<4, T, Q> tmp(a.w * b.w, a.x * b.x, a.y * b.y, a.z * b.z); - return (tmp.x + tmp.y) + (tmp.z + tmp.w); - } - }; - - template - struct compute_quat_add - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, qua const& p) - { - return qua(q.w + p.w, q.x + p.x, q.y + p.y, q.z + p.z); - } - }; - - template - struct compute_quat_sub - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, qua const& p) - { - return qua(q.w - p.w, q.x - p.x, q.y - p.y, q.z - p.z); - } - }; - - template - struct compute_quat_mul_scalar - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, T s) - { - return qua(q.w * s, q.x * s, q.y * s, q.z * s); - } - }; - - template - struct compute_quat_div_scalar - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, T s) - { - return qua(q.w / s, q.x / s, q.y / s, q.z / s); - } - }; - - template - struct compute_quat_mul_vec4 - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(qua const& q, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(q * vec<3, T, Q>(v), v.w); - } - }; -}//namespace detail - - // -- Component accesses -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & qua::operator[](typename qua::length_type i) - { - assert(i >= 0 && i < this->length()); -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - return (&w)[i]; -# else - return (&x)[i]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& qua::operator[](typename qua::length_type i) const - { - assert(i >= 0 && i < this->length()); -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - return (&w)[i]; -# else - return (&x)[i]; -# endif - } - - // -- Implicit basic constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua() -# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - : w(1), x(0), y(0), z(0) -# else - : x(0), y(0), z(0), w(1) -# endif -# endif - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(qua const& q) -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - : w(q.w), x(q.x), y(q.y), z(q.z) -# else - : x(q.x), y(q.y), z(q.z), w(q.w) -# endif - {} -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(qua const& q) -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - : w(q.w), x(q.x), y(q.y), z(q.z) -# else - : x(q.x), y(q.y), z(q.z), w(q.w) -# endif - {} - - // -- Explicit basic constructors -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(T s, vec<3, T, Q> const& v) -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - : w(s), x(v.x), y(v.y), z(v.z) -# else - : x(v.x), y(v.y), z(v.z), w(s) -# endif - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(T _w, T _x, T _y, T _z) -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - : w(_w), x(_x), y(_y), z(_z) -# else - : x(_x), y(_y), z(_z), w(_w) -# endif - {} - - // -- Conversion constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(qua const& q) -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - : w(static_cast(q.w)), x(static_cast(q.x)), y(static_cast(q.y)), z(static_cast(q.z)) -# else - : x(static_cast(q.x)), y(static_cast(q.y)), z(static_cast(q.z)), w(static_cast(q.w)) -# endif - {} - - //template - //GLM_FUNC_QUALIFIER qua::qua - //( - // valType const& pitch, - // valType const& yaw, - // valType const& roll - //) - //{ - // vec<3, valType> eulerAngle(pitch * valType(0.5), yaw * valType(0.5), roll * valType(0.5)); - // vec<3, valType> c = glm::cos(eulerAngle * valType(0.5)); - // vec<3, valType> s = glm::sin(eulerAngle * valType(0.5)); - // - // this->w = c.x * c.y * c.z + s.x * s.y * s.z; - // this->x = s.x * c.y * c.z - c.x * s.y * s.z; - // this->y = c.x * s.y * c.z + s.x * c.y * s.z; - // this->z = c.x * c.y * s.z - s.x * s.y * c.z; - //} - - template - GLM_FUNC_QUALIFIER qua::qua(vec<3, T, Q> const& u, vec<3, T, Q> const& v) - { - T norm_u_norm_v = sqrt(dot(u, u) * dot(v, v)); - T real_part = norm_u_norm_v + dot(u, v); - vec<3, T, Q> t; - - if(real_part < static_cast(1.e-6f) * norm_u_norm_v) - { - // If u and v are exactly opposite, rotate 180 degrees - // around an arbitrary orthogonal axis. Axis normalisation - // can happen later, when we normalise the quaternion. - real_part = static_cast(0); - t = abs(u.x) > abs(u.z) ? vec<3, T, Q>(-u.y, u.x, static_cast(0)) : vec<3, T, Q>(static_cast(0), -u.z, u.y); - } - else - { - // Otherwise, build quaternion the standard way. - t = cross(u, v); - } - - *this = normalize(qua(real_part, t.x, t.y, t.z)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(vec<3, T, Q> const& eulerAngle) - { - vec<3, T, Q> c = glm::cos(eulerAngle * T(0.5)); - vec<3, T, Q> s = glm::sin(eulerAngle * T(0.5)); - - this->w = c.x * c.y * c.z + s.x * s.y * s.z; - this->x = s.x * c.y * c.z - c.x * s.y * s.z; - this->y = c.x * s.y * c.z + s.x * c.y * s.z; - this->z = c.x * c.y * s.z - s.x * s.y * c.z; - } - - template - GLM_FUNC_QUALIFIER qua::qua(mat<3, 3, T, Q> const& m) - { - *this = quat_cast(m); - } - - template - GLM_FUNC_QUALIFIER qua::qua(mat<4, 4, T, Q> const& m) - { - *this = quat_cast(m); - } - -# if GLM_HAS_EXPLICIT_CONVERSION_OPERATORS - template - GLM_FUNC_QUALIFIER qua::operator mat<3, 3, T, Q>() const - { - return mat3_cast(*this); - } - - template - GLM_FUNC_QUALIFIER qua::operator mat<4, 4, T, Q>() const - { - return mat4_cast(*this); - } -# endif//GLM_HAS_EXPLICIT_CONVERSION_OPERATORS - - // -- Unary arithmetic operators -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator=(qua const& q) - { - this->w = q.w; - this->x = q.x; - this->y = q.y; - this->z = q.z; - return *this; - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator=(qua const& q) - { - this->w = static_cast(q.w); - this->x = static_cast(q.x); - this->y = static_cast(q.y); - this->z = static_cast(q.z); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator+=(qua const& q) - { - return (*this = detail::compute_quat_add::value>::call(*this, qua(q))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator-=(qua const& q) - { - return (*this = detail::compute_quat_sub::value>::call(*this, qua(q))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator*=(qua const& r) - { - qua const p(*this); - qua const q(r); - - this->w = p.w * q.w - p.x * q.x - p.y * q.y - p.z * q.z; - this->x = p.w * q.x + p.x * q.w + p.y * q.z - p.z * q.y; - this->y = p.w * q.y + p.y * q.w + p.z * q.x - p.x * q.z; - this->z = p.w * q.z + p.z * q.w + p.x * q.y - p.y * q.x; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator*=(U s) - { - return (*this = detail::compute_quat_mul_scalar::value>::call(*this, static_cast(s))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator/=(U s) - { - return (*this = detail::compute_quat_div_scalar::value>::call(*this, static_cast(s))); - } - - // -- Unary bit operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator+(qua const& q) - { - return q; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator-(qua const& q) - { - return qua(-q.w, -q.x, -q.y, -q.z); - } - - // -- Binary operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator+(qua const& q, qua const& p) - { - return qua(q) += p; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator-(qua const& q, qua const& p) - { - return qua(q) -= p; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator*(qua const& q, qua const& p) - { - return qua(q) *= p; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(qua const& q, vec<3, T, Q> const& v) - { - vec<3, T, Q> const QuatVector(q.x, q.y, q.z); - vec<3, T, Q> const uv(glm::cross(QuatVector, v)); - vec<3, T, Q> const uuv(glm::cross(QuatVector, uv)); - - return v + ((uv * q.w) + uuv) * static_cast(2); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, qua const& q) - { - return glm::inverse(q) * v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(qua const& q, vec<4, T, Q> const& v) - { - return detail::compute_quat_mul_vec4::value>::call(q, v); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, qua const& q) - { - return glm::inverse(q) * v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator*(qua const& q, T const& s) - { - return qua( - q.w * s, q.x * s, q.y * s, q.z * s); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator*(T const& s, qua const& q) - { - return q * s; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator/(qua const& q, T const& s) - { - return qua( - q.w / s, q.x / s, q.y / s, q.z / s); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(qua const& q1, qua const& q2) - { - return q1.x == q2.x && q1.y == q2.y && q1.z == q2.z && q1.w == q2.w; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(qua const& q1, qua const& q2) - { - return q1.x != q2.x || q1.y != q2.y || q1.z != q2.z || q1.w != q2.w; - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "type_quat_simd.inl" -#endif - diff --git a/third_party/glm/detail/type_quat_simd.inl b/third_party/glm/detail/type_quat_simd.inl deleted file mode 100755 index 3333e59..0000000 --- a/third_party/glm/detail/type_quat_simd.inl +++ /dev/null @@ -1,188 +0,0 @@ -/// @ref core - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -namespace glm{ -namespace detail -{ -/* - template - struct compute_quat_mul - { - static qua call(qua const& q1, qua const& q2) - { - // SSE2 STATS: 11 shuffle, 8 mul, 8 add - // SSE4 STATS: 3 shuffle, 4 mul, 4 dpps - - __m128 const mul0 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(0, 1, 2, 3))); - __m128 const mul1 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(1, 0, 3, 2))); - __m128 const mul2 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(2, 3, 0, 1))); - __m128 const mul3 = _mm_mul_ps(q1.Data, q2.Data); - -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - __m128 const add0 = _mm_dp_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f), 0xff); - __m128 const add1 = _mm_dp_ps(mul1, _mm_set_ps(1.0f, 1.0f, 1.0f, -1.0f), 0xff); - __m128 const add2 = _mm_dp_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f), 0xff); - __m128 const add3 = _mm_dp_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f), 0xff); -# else - __m128 const mul4 = _mm_mul_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f)); - __m128 const add0 = _mm_add_ps(mul0, _mm_movehl_ps(mul4, mul4)); - __m128 const add4 = _mm_add_ss(add0, _mm_shuffle_ps(add0, add0, 1)); - - __m128 const mul5 = _mm_mul_ps(mul1, _mm_set_ps(1.0f, 1.0f, 1.0f, -1.0f)); - __m128 const add1 = _mm_add_ps(mul1, _mm_movehl_ps(mul5, mul5)); - __m128 const add5 = _mm_add_ss(add1, _mm_shuffle_ps(add1, add1, 1)); - - __m128 const mul6 = _mm_mul_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f)); - __m128 const add2 = _mm_add_ps(mul6, _mm_movehl_ps(mul6, mul6)); - __m128 const add6 = _mm_add_ss(add2, _mm_shuffle_ps(add2, add2, 1)); - - __m128 const mul7 = _mm_mul_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f)); - __m128 const add3 = _mm_add_ps(mul3, _mm_movehl_ps(mul7, mul7)); - __m128 const add7 = _mm_add_ss(add3, _mm_shuffle_ps(add3, add3, 1)); - #endif - - // This SIMD code is a politically correct way of doing this, but in every test I've tried it has been slower than - // the final code below. I'll keep this here for reference - maybe somebody else can do something better... - // - //__m128 xxyy = _mm_shuffle_ps(add4, add5, _MM_SHUFFLE(0, 0, 0, 0)); - //__m128 zzww = _mm_shuffle_ps(add6, add7, _MM_SHUFFLE(0, 0, 0, 0)); - // - //return _mm_shuffle_ps(xxyy, zzww, _MM_SHUFFLE(2, 0, 2, 0)); - - qua Result; - _mm_store_ss(&Result.x, add4); - _mm_store_ss(&Result.y, add5); - _mm_store_ss(&Result.z, add6); - _mm_store_ss(&Result.w, add7); - return Result; - } - }; -*/ - - template - struct compute_quat_add - { - static qua call(qua const& q, qua const& p) - { - qua Result; - Result.data = _mm_add_ps(q.data, p.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_quat_add - { - static qua call(qua const& a, qua const& b) - { - qua Result; - Result.data = _mm256_add_pd(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_quat_sub - { - static qua call(qua const& q, qua const& p) - { - vec<4, float, Q> Result; - Result.data = _mm_sub_ps(q.data, p.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_quat_sub - { - static qua call(qua const& a, qua const& b) - { - qua Result; - Result.data = _mm256_sub_pd(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_quat_mul_scalar - { - static qua call(qua const& q, float s) - { - vec<4, float, Q> Result; - Result.data = _mm_mul_ps(q.data, _mm_set_ps1(s)); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_quat_mul_scalar - { - static qua call(qua const& q, double s) - { - qua Result; - Result.data = _mm256_mul_pd(q.data, _mm_set_ps1(s)); - return Result; - } - }; -# endif - - template - struct compute_quat_div_scalar - { - static qua call(qua const& q, float s) - { - vec<4, float, Q> Result; - Result.data = _mm_div_ps(q.data, _mm_set_ps1(s)); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_quat_div_scalar - { - static qua call(qua const& q, double s) - { - qua Result; - Result.data = _mm256_div_pd(q.data, _mm_set_ps1(s)); - return Result; - } - }; -# endif - - template - struct compute_quat_mul_vec4 - { - static vec<4, float, Q> call(qua const& q, vec<4, float, Q> const& v) - { - __m128 const q_wwww = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 3, 3, 3)); - __m128 const q_swp0 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 0, 2, 1)); - __m128 const q_swp1 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 1, 0, 2)); - __m128 const v_swp0 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 0, 2, 1)); - __m128 const v_swp1 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 1, 0, 2)); - - __m128 uv = _mm_sub_ps(_mm_mul_ps(q_swp0, v_swp1), _mm_mul_ps(q_swp1, v_swp0)); - __m128 uv_swp0 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 0, 2, 1)); - __m128 uv_swp1 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 1, 0, 2)); - __m128 uuv = _mm_sub_ps(_mm_mul_ps(q_swp0, uv_swp1), _mm_mul_ps(q_swp1, uv_swp0)); - - __m128 const two = _mm_set1_ps(2.0f); - uv = _mm_mul_ps(uv, _mm_mul_ps(q_wwww, two)); - uuv = _mm_mul_ps(uuv, two); - - vec<4, float, Q> Result; - Result.data = _mm_add_ps(v.Data, _mm_add_ps(uv, uuv)); - return Result; - } - }; -}//namespace detail -}//namespace glm - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT - diff --git a/third_party/glm/detail/type_vec1.hpp b/third_party/glm/detail/type_vec1.hpp deleted file mode 100755 index 51163f1..0000000 --- a/third_party/glm/detail/type_vec1.hpp +++ /dev/null @@ -1,308 +0,0 @@ -/// @ref core -/// @file glm/detail/type_vec1.hpp - -#pragma once - -#include "qualifier.hpp" -#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR -# include "_swizzle.hpp" -#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION -# include "_swizzle_func.hpp" -#endif -#include - -namespace glm -{ - template - struct vec<1, T, Q> - { - // -- Implementation detail -- - - typedef T value_type; - typedef vec<1, T, Q> type; - typedef vec<1, bool, Q> bool_type; - - // -- Data -- - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wpedantic" -# elif GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" -# pragma clang diagnostic ignored "-Wnested-anon-types" -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(push) -# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union -# endif -# endif - -# if GLM_CONFIG_XYZW_ONLY - T x; -# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE - union - { - T x; - T r; - T s; - - typename detail::storage<1, T, detail::is_aligned::value>::type data; -/* -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - _GLM_SWIZZLE1_2_MEMBERS(T, Q, x) - _GLM_SWIZZLE1_2_MEMBERS(T, Q, r) - _GLM_SWIZZLE1_2_MEMBERS(T, Q, s) - _GLM_SWIZZLE1_3_MEMBERS(T, Q, x) - _GLM_SWIZZLE1_3_MEMBERS(T, Q, r) - _GLM_SWIZZLE1_3_MEMBERS(T, Q, s) - _GLM_SWIZZLE1_4_MEMBERS(T, Q, x) - _GLM_SWIZZLE1_4_MEMBERS(T, Q, r) - _GLM_SWIZZLE1_4_MEMBERS(T, Q, s) -# endif -*/ - }; -# else - union {T x, r, s;}; -/* -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION - GLM_SWIZZLE_GEN_VEC_FROM_VEC1(T, Q) -# endif -*/ -# endif - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(pop) -# endif -# endif - - // -- Component accesses -- - - /// Return the count of components of the vector - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 1;} - - GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; - - // -- Implicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec() GLM_DEFAULT; - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, T, P> const& v); - - // -- Explicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar); - - // -- Conversion vector constructors -- - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<2, U, P> const& v); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<3, U, P> const& v); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v); - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<1, U, P> const& v); - - // -- Swizzle constructors -- -/* -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<1, T, Q, E0, -1,-2,-3> const& that) - { - *this = that(); - } -# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR -*/ - // -- Unary arithmetic operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator=(vec const& v) GLM_DEFAULT; - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator+=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator+=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator-=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator-=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator*=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator*=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator/=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator/=(vec<1, U, Q> const& v); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator++(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator--(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator++(int); - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator--(int); - - // -- Unary bit operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator%=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator%=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator&=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator&=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator|=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator|=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator^=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator^=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator<<=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator<<=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator>>=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator>>=(vec<1, U, Q> const& v); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v); - - // -- Binary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator~(vec<1, T, Q> const& v); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, bool, Q> operator&&(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, bool, Q> operator||(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_vec1.inl" -#endif//GLM_EXTERNAL_TEMPLATE diff --git a/third_party/glm/detail/type_vec1.inl b/third_party/glm/detail/type_vec1.inl deleted file mode 100755 index d0f49fd..0000000 --- a/third_party/glm/detail/type_vec1.inl +++ /dev/null @@ -1,551 +0,0 @@ -/// @ref core - -#include "./compute_vector_relational.hpp" - -namespace glm -{ - // -- Implicit basic constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec() -# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE - : x(0) -# endif - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, T, Q> const& v) - : x(v.x) - {} -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, T, P> const& v) - : x(v.x) - {} - - // -- Explicit basic constructors -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(T scalar) - : x(scalar) - {} - - // -- Conversion vector constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, U, P> const& v) - : x(static_cast(v.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<2, U, P> const& v) - : x(static_cast(v.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<3, U, P> const& v) - : x(static_cast(v.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<4, U, P> const& v) - : x(static_cast(v.x)) - {} - - // -- Component accesses -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<1, T, Q>::operator[](typename vec<1, T, Q>::length_type) - { - return x; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<1, T, Q>::operator[](typename vec<1, T, Q>::length_type) const - { - return x; - } - - // -- Unary arithmetic operators -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator=(vec<1, T, Q> const& v) - { - this->x = v.x; - return *this; - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator=(vec<1, U, Q> const& v) - { - this->x = static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator+=(U scalar) - { - this->x += static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator+=(vec<1, U, Q> const& v) - { - this->x += static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator-=(U scalar) - { - this->x -= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator-=(vec<1, U, Q> const& v) - { - this->x -= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator*=(U scalar) - { - this->x *= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator*=(vec<1, U, Q> const& v) - { - this->x *= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator/=(U scalar) - { - this->x /= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator/=(vec<1, U, Q> const& v) - { - this->x /= static_cast(v.x); - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator++() - { - ++this->x; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator--() - { - --this->x; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> vec<1, T, Q>::operator++(int) - { - vec<1, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> vec<1, T, Q>::operator--(int) - { - vec<1, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary bit operators -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator%=(U scalar) - { - this->x %= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator%=(vec<1, U, Q> const& v) - { - this->x %= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator&=(U scalar) - { - this->x &= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator&=(vec<1, U, Q> const& v) - { - this->x &= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator|=(U scalar) - { - this->x |= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator|=(vec<1, U, Q> const& v) - { - this->x |= U(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator^=(U scalar) - { - this->x ^= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator^=(vec<1, U, Q> const& v) - { - this->x ^= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator<<=(U scalar) - { - this->x <<= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator<<=(vec<1, U, Q> const& v) - { - this->x <<= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator>>=(U scalar) - { - this->x >>= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator>>=(vec<1, U, Q> const& v) - { - this->x >>= static_cast(v.x); - return *this; - } - - // -- Unary constant operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v) - { - return v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - -v.x); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x + scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar + v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x + v2.x); - } - - //operator- - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x - scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar - v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x - v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x * scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar * v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x * v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x / scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar / v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x / v2.x); - } - - // -- Binary bit operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x % scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar % v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x % v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x & scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar & v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x & v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x | scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar | v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x | v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x ^ scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar ^ v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x ^ v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - static_cast(v.x << scalar)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar << v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x << v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x >> scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar >> v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x >> v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator~(vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - ~v.x); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return detail::compute_equal::is_iec559>::call(v1.x, v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return !(v1 == v2); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, bool, Q> operator&&(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2) - { - return vec<1, bool, Q>(v1.x && v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, bool, Q> operator||(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2) - { - return vec<1, bool, Q>(v1.x || v2.x); - } -}//namespace glm diff --git a/third_party/glm/detail/type_vec2.hpp b/third_party/glm/detail/type_vec2.hpp deleted file mode 100755 index 52ef408..0000000 --- a/third_party/glm/detail/type_vec2.hpp +++ /dev/null @@ -1,399 +0,0 @@ -/// @ref core -/// @file glm/detail/type_vec2.hpp - -#pragma once - -#include "qualifier.hpp" -#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR -# include "_swizzle.hpp" -#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION -# include "_swizzle_func.hpp" -#endif -#include - -namespace glm -{ - template - struct vec<2, T, Q> - { - // -- Implementation detail -- - - typedef T value_type; - typedef vec<2, T, Q> type; - typedef vec<2, bool, Q> bool_type; - - // -- Data -- - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wpedantic" -# elif GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" -# pragma clang diagnostic ignored "-Wnested-anon-types" -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(push) -# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union -# endif -# endif - -# if GLM_CONFIG_XYZW_ONLY - T x, y; -# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE - union - { - struct{ T x, y; }; - struct{ T r, g; }; - struct{ T s, t; }; - - typename detail::storage<2, T, detail::is_aligned::value>::type data; - -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - GLM_SWIZZLE2_2_MEMBERS(T, Q, x, y) - GLM_SWIZZLE2_2_MEMBERS(T, Q, r, g) - GLM_SWIZZLE2_2_MEMBERS(T, Q, s, t) - GLM_SWIZZLE2_3_MEMBERS(T, Q, x, y) - GLM_SWIZZLE2_3_MEMBERS(T, Q, r, g) - GLM_SWIZZLE2_3_MEMBERS(T, Q, s, t) - GLM_SWIZZLE2_4_MEMBERS(T, Q, x, y) - GLM_SWIZZLE2_4_MEMBERS(T, Q, r, g) - GLM_SWIZZLE2_4_MEMBERS(T, Q, s, t) -# endif - }; -# else - union {T x, r, s;}; - union {T y, g, t;}; - -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION - GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, Q) -# endif//GLM_CONFIG_SWIZZLE -# endif - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(pop) -# endif -# endif - - // -- Component accesses -- - - /// Return the count of components of the vector - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 2;} - - GLM_FUNC_DECL GLM_CONSTEXPR T& operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; - - // -- Implicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec() GLM_DEFAULT; - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, T, P> const& v); - - // -- Explicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR vec(T x, T y); - - // -- Conversion constructors -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(vec<1, U, P> const& v); - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A x, B y); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, Q> const& x, B y); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A x, vec<1, B, Q> const& y); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, Q> const& x, vec<1, B, Q> const& y); - - // -- Conversion vector constructors -- - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<3, U, P> const& v); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v); - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<2, U, P> const& v); - - // -- Swizzle constructors -- -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1,-1,-2> const& that) - { - *this = that(); - } -# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - - // -- Unary arithmetic operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator=(vec const& v) GLM_DEFAULT; - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(vec<2, U, Q> const& v); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator++(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator--(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator++(int); - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator--(int); - - // -- Unary bit operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(vec<2, U, Q> const& v); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v); - - // -- Binary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator~(vec<2, T, Q> const& v); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, bool, Q> operator&&(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, bool, Q> operator||(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_vec2.inl" -#endif//GLM_EXTERNAL_TEMPLATE diff --git a/third_party/glm/detail/type_vec2.inl b/third_party/glm/detail/type_vec2.inl deleted file mode 100755 index 8e65d6b..0000000 --- a/third_party/glm/detail/type_vec2.inl +++ /dev/null @@ -1,913 +0,0 @@ -/// @ref core - -#include "./compute_vector_relational.hpp" - -namespace glm -{ - // -- Implicit basic constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec() -# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE - : x(0), y(0) -# endif - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, T, Q> const& v) - : x(v.x), y(v.y) - {} -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, T, P> const& v) - : x(v.x), y(v.y) - {} - - // -- Explicit basic constructors -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(T scalar) - : x(scalar), y(scalar) - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(T _x, T _y) - : x(_x), y(_y) - {} - - // -- Conversion scalar constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(A _x, B _y) - : x(static_cast(_x)) - , y(static_cast(_y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, A, Q> const& _x, B _y) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(A _x, vec<1, B, Q> const& _y) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, A, Q> const& _x, vec<1, B, Q> const& _y) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - {} - - // -- Conversion vector constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<3, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<4, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.y)) - {} - - // -- Component accesses -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<2, T, Q>::operator[](typename vec<2, T, Q>::length_type i) - { - assert(i >= 0 && i < this->length()); - switch(i) - { - default: - case 0: - return x; - case 1: - return y; - } - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<2, T, Q>::operator[](typename vec<2, T, Q>::length_type i) const - { - assert(i >= 0 && i < this->length()); - switch(i) - { - default: - case 0: - return x; - case 1: - return y; - } - } - - // -- Unary arithmetic operators -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator=(vec<2, T, Q> const& v) - { - this->x = v.x; - this->y = v.y; - return *this; - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator=(vec<2, U, Q> const& v) - { - this->x = static_cast(v.x); - this->y = static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(U scalar) - { - this->x += static_cast(scalar); - this->y += static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(vec<1, U, Q> const& v) - { - this->x += static_cast(v.x); - this->y += static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(vec<2, U, Q> const& v) - { - this->x += static_cast(v.x); - this->y += static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(U scalar) - { - this->x -= static_cast(scalar); - this->y -= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(vec<1, U, Q> const& v) - { - this->x -= static_cast(v.x); - this->y -= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(vec<2, U, Q> const& v) - { - this->x -= static_cast(v.x); - this->y -= static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(U scalar) - { - this->x *= static_cast(scalar); - this->y *= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(vec<1, U, Q> const& v) - { - this->x *= static_cast(v.x); - this->y *= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(vec<2, U, Q> const& v) - { - this->x *= static_cast(v.x); - this->y *= static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(U scalar) - { - this->x /= static_cast(scalar); - this->y /= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(vec<1, U, Q> const& v) - { - this->x /= static_cast(v.x); - this->y /= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(vec<2, U, Q> const& v) - { - this->x /= static_cast(v.x); - this->y /= static_cast(v.y); - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator++() - { - ++this->x; - ++this->y; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator--() - { - --this->x; - --this->y; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> vec<2, T, Q>::operator++(int) - { - vec<2, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> vec<2, T, Q>::operator--(int) - { - vec<2, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary bit operators -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(U scalar) - { - this->x %= static_cast(scalar); - this->y %= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(vec<1, U, Q> const& v) - { - this->x %= static_cast(v.x); - this->y %= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(vec<2, U, Q> const& v) - { - this->x %= static_cast(v.x); - this->y %= static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(U scalar) - { - this->x &= static_cast(scalar); - this->y &= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(vec<1, U, Q> const& v) - { - this->x &= static_cast(v.x); - this->y &= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(vec<2, U, Q> const& v) - { - this->x &= static_cast(v.x); - this->y &= static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(U scalar) - { - this->x |= static_cast(scalar); - this->y |= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(vec<1, U, Q> const& v) - { - this->x |= static_cast(v.x); - this->y |= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(vec<2, U, Q> const& v) - { - this->x |= static_cast(v.x); - this->y |= static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(U scalar) - { - this->x ^= static_cast(scalar); - this->y ^= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(vec<1, U, Q> const& v) - { - this->x ^= static_cast(v.x); - this->y ^= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(vec<2, U, Q> const& v) - { - this->x ^= static_cast(v.x); - this->y ^= static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(U scalar) - { - this->x <<= static_cast(scalar); - this->y <<= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(vec<1, U, Q> const& v) - { - this->x <<= static_cast(v.x); - this->y <<= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(vec<2, U, Q> const& v) - { - this->x <<= static_cast(v.x); - this->y <<= static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(U scalar) - { - this->x >>= static_cast(scalar); - this->y >>= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(vec<1, U, Q> const& v) - { - this->x >>= static_cast(v.x); - this->y >>= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(vec<2, U, Q> const& v) - { - this->x >>= static_cast(v.x); - this->y >>= static_cast(v.y); - return *this; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v) - { - return v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - -v.x, - -v.y); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x + scalar, - v.y + scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x + v2.x, - v1.y + v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar + v.x, - scalar + v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x + v2.x, - v1.x + v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x + v2.x, - v1.y + v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x - scalar, - v.y - scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x - v2.x, - v1.y - v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar - v.x, - scalar - v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x - v2.x, - v1.x - v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x - v2.x, - v1.y - v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x * scalar, - v.y * scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x * v2.x, - v1.y * v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar * v.x, - scalar * v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x * v2.x, - v1.x * v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x * v2.x, - v1.y * v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x / scalar, - v.y / scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x / v2.x, - v1.y / v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar / v.x, - scalar / v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x / v2.x, - v1.x / v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x / v2.x, - v1.y / v2.y); - } - - // -- Binary bit operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x % scalar, - v.y % scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x % v2.x, - v1.y % v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar % v.x, - scalar % v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x % v2.x, - v1.x % v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x % v2.x, - v1.y % v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x & scalar, - v.y & scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x & v2.x, - v1.y & v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar & v.x, - scalar & v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x & v2.x, - v1.x & v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x & v2.x, - v1.y & v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x | scalar, - v.y | scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x | v2.x, - v1.y | v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar | v.x, - scalar | v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x | v2.x, - v1.x | v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x | v2.x, - v1.y | v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x ^ scalar, - v.y ^ scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x ^ v2.x, - v1.y ^ v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar ^ v.x, - scalar ^ v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x ^ v2.x, - v1.x ^ v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x ^ v2.x, - v1.y ^ v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x << scalar, - v.y << scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x << v2.x, - v1.y << v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar << v.x, - scalar << v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x << v2.x, - v1.x << v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x << v2.x, - v1.y << v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x >> scalar, - v.y >> scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x >> v2.x, - v1.y >> v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar >> v.x, - scalar >> v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x >> v2.x, - v1.x >> v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x >> v2.x, - v1.y >> v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator~(vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - ~v.x, - ~v.y); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return - detail::compute_equal::is_iec559>::call(v1.x, v2.x) && - detail::compute_equal::is_iec559>::call(v1.y, v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return !(v1 == v2); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, bool, Q> operator&&(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2) - { - return vec<2, bool, Q>(v1.x && v2.x, v1.y && v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, bool, Q> operator||(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2) - { - return vec<2, bool, Q>(v1.x || v2.x, v1.y || v2.y); - } -}//namespace glm diff --git a/third_party/glm/detail/type_vec3.hpp b/third_party/glm/detail/type_vec3.hpp deleted file mode 100755 index d83cde6..0000000 --- a/third_party/glm/detail/type_vec3.hpp +++ /dev/null @@ -1,432 +0,0 @@ -/// @ref core -/// @file glm/detail/type_vec3.hpp - -#pragma once - -#include "qualifier.hpp" -#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR -# include "_swizzle.hpp" -#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION -# include "_swizzle_func.hpp" -#endif -#include - -namespace glm -{ - template - struct vec<3, T, Q> - { - // -- Implementation detail -- - - typedef T value_type; - typedef vec<3, T, Q> type; - typedef vec<3, bool, Q> bool_type; - - // -- Data -- - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wpedantic" -# elif GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" -# pragma clang diagnostic ignored "-Wnested-anon-types" -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(push) -# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE -# pragma warning(disable: 4324) // structure was padded due to alignment specifier -# endif -# endif -# endif - -# if GLM_CONFIG_XYZW_ONLY - T x, y, z; -# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE - union - { - struct{ T x, y, z; }; - struct{ T r, g, b; }; - struct{ T s, t, p; }; - - typename detail::storage<3, T, detail::is_aligned::value>::type data; - -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - GLM_SWIZZLE3_2_MEMBERS(T, Q, x, y, z) - GLM_SWIZZLE3_2_MEMBERS(T, Q, r, g, b) - GLM_SWIZZLE3_2_MEMBERS(T, Q, s, t, p) - GLM_SWIZZLE3_3_MEMBERS(T, Q, x, y, z) - GLM_SWIZZLE3_3_MEMBERS(T, Q, r, g, b) - GLM_SWIZZLE3_3_MEMBERS(T, Q, s, t, p) - GLM_SWIZZLE3_4_MEMBERS(T, Q, x, y, z) - GLM_SWIZZLE3_4_MEMBERS(T, Q, r, g, b) - GLM_SWIZZLE3_4_MEMBERS(T, Q, s, t, p) -# endif - }; -# else - union { T x, r, s; }; - union { T y, g, t; }; - union { T z, b, p; }; - -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION - GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, Q) -# endif//GLM_CONFIG_SWIZZLE -# endif//GLM_LANG - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(pop) -# endif -# endif - - // -- Component accesses -- - - /// Return the count of components of the vector - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 3;} - - GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; - - // -- Implicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec() GLM_DEFAULT; - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<3, T, P> const& v); - - // -- Explicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR vec(T a, T b, T c); - - // -- Conversion scalar constructors -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(vec<1, U, P> const& v); - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X x, Y y, Z z); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, Z _z); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, Z _z); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, vec<1, Z, Q> const& _z); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z); - - // -- Conversion vector constructors -- - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, B _z); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<2, B, P> const& _yz); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v); - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<3, U, P> const& v); - - // -- Swizzle constructors -- -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<3, T, Q, E0, E1, E2, -1> const& that) - { - *this = that(); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& scalar) - { - *this = vec(v(), scalar); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& scalar, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v) - { - *this = vec(scalar, v()); - } -# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - - // -- Unary arithmetic operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q>& operator=(vec<3, T, Q> const& v) GLM_DEFAULT; - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(vec<3, U, Q> const& v); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator++(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator--(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator++(int); - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator--(int); - - // -- Unary bit operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(vec<3, U, Q> const& v); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v); - - // -- Binary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator~(vec<3, T, Q> const& v); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, bool, Q> operator&&(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, bool, Q> operator||(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_vec3.inl" -#endif//GLM_EXTERNAL_TEMPLATE diff --git a/third_party/glm/detail/type_vec3.inl b/third_party/glm/detail/type_vec3.inl deleted file mode 100755 index 6532c9e..0000000 --- a/third_party/glm/detail/type_vec3.inl +++ /dev/null @@ -1,1068 +0,0 @@ -/// @ref core - -#include "compute_vector_relational.hpp" - -namespace glm -{ - // -- Implicit basic constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec() -# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE - : x(0), y(0), z(0) -# endif - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, T, Q> const& v) - : x(v.x), y(v.y), z(v.z) - {} -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, T, P> const& v) - : x(v.x), y(v.y), z(v.z) - {} - - // -- Explicit basic constructors -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(T scalar) - : x(scalar), y(scalar), z(scalar) - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(T _x, T _y, T _z) - : x(_x), y(_y), z(_z) - {} - - // -- Conversion scalar constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.x)) - , z(static_cast(v.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, Y _y, Z _z) - : x(static_cast(_x)) - , y(static_cast(_y)) - , z(static_cast(_z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - , z(static_cast(_z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - , z(static_cast(_z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - , z(static_cast(_z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z) - : x(static_cast(_x)) - , y(static_cast(_y)) - , z(static_cast(_z.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - , z(static_cast(_z.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - , z(static_cast(_z.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - , z(static_cast(_z.x)) - {} - - // -- Conversion vector constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<2, A, P> const& _xy, B _z) - : x(static_cast(_xy.x)) - , y(static_cast(_xy.y)) - , z(static_cast(_z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z) - : x(static_cast(_xy.x)) - , y(static_cast(_xy.y)) - , z(static_cast(_z.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(A _x, vec<2, B, P> const& _yz) - : x(static_cast(_x)) - , y(static_cast(_yz.x)) - , z(static_cast(_yz.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz) - : x(static_cast(_x.x)) - , y(static_cast(_yz.x)) - , z(static_cast(_yz.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.y)) - , z(static_cast(v.z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<4, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.y)) - , z(static_cast(v.z)) - {} - - // -- Component accesses -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<3, T, Q>::operator[](typename vec<3, T, Q>::length_type i) - { - assert(i >= 0 && i < this->length()); - switch(i) - { - default: - case 0: - return x; - case 1: - return y; - case 2: - return z; - } - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<3, T, Q>::operator[](typename vec<3, T, Q>::length_type i) const - { - assert(i >= 0 && i < this->length()); - switch(i) - { - default: - case 0: - return x; - case 1: - return y; - case 2: - return z; - } - } - - // -- Unary arithmetic operators -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>& vec<3, T, Q>::operator=(vec<3, T, Q> const& v) - { - this->x = v.x; - this->y = v.y; - this->z = v.z; - return *this; - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>& vec<3, T, Q>::operator=(vec<3, U, Q> const& v) - { - this->x = static_cast(v.x); - this->y = static_cast(v.y); - this->z = static_cast(v.z); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(U scalar) - { - this->x += static_cast(scalar); - this->y += static_cast(scalar); - this->z += static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(vec<1, U, Q> const& v) - { - this->x += static_cast(v.x); - this->y += static_cast(v.x); - this->z += static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(vec<3, U, Q> const& v) - { - this->x += static_cast(v.x); - this->y += static_cast(v.y); - this->z += static_cast(v.z); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(U scalar) - { - this->x -= static_cast(scalar); - this->y -= static_cast(scalar); - this->z -= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(vec<1, U, Q> const& v) - { - this->x -= static_cast(v.x); - this->y -= static_cast(v.x); - this->z -= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(vec<3, U, Q> const& v) - { - this->x -= static_cast(v.x); - this->y -= static_cast(v.y); - this->z -= static_cast(v.z); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(U scalar) - { - this->x *= static_cast(scalar); - this->y *= static_cast(scalar); - this->z *= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(vec<1, U, Q> const& v) - { - this->x *= static_cast(v.x); - this->y *= static_cast(v.x); - this->z *= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(vec<3, U, Q> const& v) - { - this->x *= static_cast(v.x); - this->y *= static_cast(v.y); - this->z *= static_cast(v.z); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(U v) - { - this->x /= static_cast(v); - this->y /= static_cast(v); - this->z /= static_cast(v); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(vec<1, U, Q> const& v) - { - this->x /= static_cast(v.x); - this->y /= static_cast(v.x); - this->z /= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(vec<3, U, Q> const& v) - { - this->x /= static_cast(v.x); - this->y /= static_cast(v.y); - this->z /= static_cast(v.z); - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator++() - { - ++this->x; - ++this->y; - ++this->z; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator--() - { - --this->x; - --this->y; - --this->z; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> vec<3, T, Q>::operator++(int) - { - vec<3, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> vec<3, T, Q>::operator--(int) - { - vec<3, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary bit operators -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(U scalar) - { - this->x %= scalar; - this->y %= scalar; - this->z %= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(vec<1, U, Q> const& v) - { - this->x %= v.x; - this->y %= v.x; - this->z %= v.x; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(vec<3, U, Q> const& v) - { - this->x %= v.x; - this->y %= v.y; - this->z %= v.z; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(U scalar) - { - this->x &= scalar; - this->y &= scalar; - this->z &= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(vec<1, U, Q> const& v) - { - this->x &= v.x; - this->y &= v.x; - this->z &= v.x; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(vec<3, U, Q> const& v) - { - this->x &= v.x; - this->y &= v.y; - this->z &= v.z; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(U scalar) - { - this->x |= scalar; - this->y |= scalar; - this->z |= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(vec<1, U, Q> const& v) - { - this->x |= v.x; - this->y |= v.x; - this->z |= v.x; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(vec<3, U, Q> const& v) - { - this->x |= v.x; - this->y |= v.y; - this->z |= v.z; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(U scalar) - { - this->x ^= scalar; - this->y ^= scalar; - this->z ^= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(vec<1, U, Q> const& v) - { - this->x ^= v.x; - this->y ^= v.x; - this->z ^= v.x; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(vec<3, U, Q> const& v) - { - this->x ^= v.x; - this->y ^= v.y; - this->z ^= v.z; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(U scalar) - { - this->x <<= scalar; - this->y <<= scalar; - this->z <<= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(vec<1, U, Q> const& v) - { - this->x <<= static_cast(v.x); - this->y <<= static_cast(v.x); - this->z <<= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(vec<3, U, Q> const& v) - { - this->x <<= static_cast(v.x); - this->y <<= static_cast(v.y); - this->z <<= static_cast(v.z); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(U scalar) - { - this->x >>= static_cast(scalar); - this->y >>= static_cast(scalar); - this->z >>= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(vec<1, U, Q> const& v) - { - this->x >>= static_cast(v.x); - this->y >>= static_cast(v.x); - this->z >>= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(vec<3, U, Q> const& v) - { - this->x >>= static_cast(v.x); - this->y >>= static_cast(v.y); - this->z >>= static_cast(v.z); - return *this; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v) - { - return v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - -v.x, - -v.y, - -v.z); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x + scalar, - v.y + scalar, - v.z + scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x + scalar.x, - v.y + scalar.x, - v.z + scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar + v.x, - scalar + v.y, - scalar + v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x + v.x, - scalar.x + v.y, - scalar.x + v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x + v2.x, - v1.y + v2.y, - v1.z + v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x - scalar, - v.y - scalar, - v.z - scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x - scalar.x, - v.y - scalar.x, - v.z - scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar - v.x, - scalar - v.y, - scalar - v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x - v.x, - scalar.x - v.y, - scalar.x - v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x - v2.x, - v1.y - v2.y, - v1.z - v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x * scalar, - v.y * scalar, - v.z * scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x * scalar.x, - v.y * scalar.x, - v.z * scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar * v.x, - scalar * v.y, - scalar * v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x * v.x, - scalar.x * v.y, - scalar.x * v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x * v2.x, - v1.y * v2.y, - v1.z * v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x / scalar, - v.y / scalar, - v.z / scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x / scalar.x, - v.y / scalar.x, - v.z / scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar / v.x, - scalar / v.y, - scalar / v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x / v.x, - scalar.x / v.y, - scalar.x / v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x / v2.x, - v1.y / v2.y, - v1.z / v2.z); - } - - // -- Binary bit operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x % scalar, - v.y % scalar, - v.z % scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x % scalar.x, - v.y % scalar.x, - v.z % scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar % v.x, - scalar % v.y, - scalar % v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x % v.x, - scalar.x % v.y, - scalar.x % v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x % v2.x, - v1.y % v2.y, - v1.z % v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x & scalar, - v.y & scalar, - v.z & scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x & scalar.x, - v.y & scalar.x, - v.z & scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar & v.x, - scalar & v.y, - scalar & v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x & v.x, - scalar.x & v.y, - scalar.x & v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x & v2.x, - v1.y & v2.y, - v1.z & v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x | scalar, - v.y | scalar, - v.z | scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x | scalar.x, - v.y | scalar.x, - v.z | scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar | v.x, - scalar | v.y, - scalar | v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x | v.x, - scalar.x | v.y, - scalar.x | v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x | v2.x, - v1.y | v2.y, - v1.z | v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x ^ scalar, - v.y ^ scalar, - v.z ^ scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x ^ scalar.x, - v.y ^ scalar.x, - v.z ^ scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar ^ v.x, - scalar ^ v.y, - scalar ^ v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x ^ v.x, - scalar.x ^ v.y, - scalar.x ^ v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x ^ v2.x, - v1.y ^ v2.y, - v1.z ^ v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x << scalar, - v.y << scalar, - v.z << scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x << scalar.x, - v.y << scalar.x, - v.z << scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar << v.x, - scalar << v.y, - scalar << v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x << v.x, - scalar.x << v.y, - scalar.x << v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x << v2.x, - v1.y << v2.y, - v1.z << v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x >> scalar, - v.y >> scalar, - v.z >> scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x >> scalar.x, - v.y >> scalar.x, - v.z >> scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar >> v.x, - scalar >> v.y, - scalar >> v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x >> v.x, - scalar.x >> v.y, - scalar.x >> v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x >> v2.x, - v1.y >> v2.y, - v1.z >> v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator~(vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - ~v.x, - ~v.y, - ~v.z); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return - detail::compute_equal::is_iec559>::call(v1.x, v2.x) && - detail::compute_equal::is_iec559>::call(v1.y, v2.y) && - detail::compute_equal::is_iec559>::call(v1.z, v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return !(v1 == v2); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, bool, Q> operator&&(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2) - { - return vec<3, bool, Q>(v1.x && v2.x, v1.y && v2.y, v1.z && v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, bool, Q> operator||(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2) - { - return vec<3, bool, Q>(v1.x || v2.x, v1.y || v2.y, v1.z || v2.z); - } -}//namespace glm diff --git a/third_party/glm/detail/type_vec4.hpp b/third_party/glm/detail/type_vec4.hpp deleted file mode 100755 index 4a36434..0000000 --- a/third_party/glm/detail/type_vec4.hpp +++ /dev/null @@ -1,505 +0,0 @@ -/// @ref core -/// @file glm/detail/type_vec4.hpp - -#pragma once - -#include "qualifier.hpp" -#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR -# include "_swizzle.hpp" -#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION -# include "_swizzle_func.hpp" -#endif -#include - -namespace glm -{ - template - struct vec<4, T, Q> - { - // -- Implementation detail -- - - typedef T value_type; - typedef vec<4, T, Q> type; - typedef vec<4, bool, Q> bool_type; - - // -- Data -- - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wpedantic" -# elif GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" -# pragma clang diagnostic ignored "-Wnested-anon-types" -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(push) -# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union -# endif -# endif - -# if GLM_CONFIG_XYZW_ONLY - T x, y, z, w; -# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE - union - { - struct { T x, y, z, w; }; - struct { T r, g, b, a; }; - struct { T s, t, p, q; }; - - typename detail::storage<4, T, detail::is_aligned::value>::type data; - -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - GLM_SWIZZLE4_2_MEMBERS(T, Q, x, y, z, w) - GLM_SWIZZLE4_2_MEMBERS(T, Q, r, g, b, a) - GLM_SWIZZLE4_2_MEMBERS(T, Q, s, t, p, q) - GLM_SWIZZLE4_3_MEMBERS(T, Q, x, y, z, w) - GLM_SWIZZLE4_3_MEMBERS(T, Q, r, g, b, a) - GLM_SWIZZLE4_3_MEMBERS(T, Q, s, t, p, q) - GLM_SWIZZLE4_4_MEMBERS(T, Q, x, y, z, w) - GLM_SWIZZLE4_4_MEMBERS(T, Q, r, g, b, a) - GLM_SWIZZLE4_4_MEMBERS(T, Q, s, t, p, q) -# endif - }; -# else - union { T x, r, s; }; - union { T y, g, t; }; - union { T z, b, p; }; - union { T w, a, q; }; - -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION - GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, Q) -# endif -# endif - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(pop) -# endif -# endif - - // -- Component accesses -- - - typedef length_t length_type; - - /// Return the count of components of the vector - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;} - - GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; - - // -- Implicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec() GLM_DEFAULT; - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<4, T, Q> const& v) GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<4, T, P> const& v); - - // -- Explicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR vec(T x, T y, T z, T w); - - // -- Conversion scalar constructors -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(vec<1, U, P> const& v); - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, Z _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, Z _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, Z _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, vec<1, Z, Q> const& _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, Z _z, vec<1, W, Q> const& _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _Y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); - - // -- Conversion vector constructors -- - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, B _z, C _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, C _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, B _z, vec<1, C, P> const& _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, vec<1, C, P> const& _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<2, B, P> const& _yz, C _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, C _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, B _y, vec<2, C, P> const& _zw); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, B _y, vec<2, C, P> const& _zw); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<3, A, P> const& _xyz, B _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<3, A, P> const& _xyz, vec<1, B, P> const& _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<3, B, P> const& _yzw); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<3, B, P> const& _yzw); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<2, B, P> const& _zw); - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v); - - // -- Swizzle constructors -- -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<4, T, Q, E0, E1, E2, E3> const& that) - { - *this = that(); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, detail::_swizzle<2, T, Q, F0, F1, -1, -2> const& u) - { - *this = vec<4, T, Q>(v(), u()); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& x, T const& y, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v) - { - *this = vec<4, T, Q>(x, y, v()); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& x, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& w) - { - *this = vec<4, T, Q>(x, v(), w); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& z, T const& w) - { - *this = vec<4, T, Q>(v(), z, w); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<3, T, Q, E0, E1, E2, -1> const& v, T const& w) - { - *this = vec<4, T, Q>(v(), w); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& x, detail::_swizzle<3, T, Q, E0, E1, E2, -1> const& v) - { - *this = vec<4, T, Q>(x, v()); - } -# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - - // -- Unary arithmetic operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator=(vec<4, T, Q> const& v) GLM_DEFAULT; - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(vec<4, U, Q> const& v); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator++(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator--(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator++(int); - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator--(int); - - // -- Unary bit operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(vec<4, U, Q> const& v); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v); - - // -- Binary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v, T const & scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v, T const & scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, T const & scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v, T const & scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator~(vec<4, T, Q> const& v); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> operator&&(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> operator||(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_vec4.inl" -#endif//GLM_EXTERNAL_TEMPLATE diff --git a/third_party/glm/detail/type_vec4.inl b/third_party/glm/detail/type_vec4.inl deleted file mode 100755 index 3c212d9..0000000 --- a/third_party/glm/detail/type_vec4.inl +++ /dev/null @@ -1,1140 +0,0 @@ -/// @ref core - -#include "compute_vector_relational.hpp" - -namespace glm{ -namespace detail -{ - template - struct compute_vec4_add - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); - } - }; - - template - struct compute_vec4_sub - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); - } - }; - - template - struct compute_vec4_mul - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); - } - }; - - template - struct compute_vec4_div - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); - } - }; - - template - struct compute_vec4_mod - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x % b.x, a.y % b.y, a.z % b.z, a.w % b.w); - } - }; - - template - struct compute_vec4_and - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x & b.x, a.y & b.y, a.z & b.z, a.w & b.w); - } - }; - - template - struct compute_vec4_or - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x | b.x, a.y | b.y, a.z | b.z, a.w | b.w); - } - }; - - template - struct compute_vec4_xor - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w); - } - }; - - template - struct compute_vec4_shift_left - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x << b.x, a.y << b.y, a.z << b.z, a.w << b.w); - } - }; - - template - struct compute_vec4_shift_right - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x >> b.x, a.y >> b.y, a.z >> b.z, a.w >> b.w); - } - }; - - template - struct compute_vec4_equal - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return - detail::compute_equal::is_iec559>::call(v1.x, v2.x) && - detail::compute_equal::is_iec559>::call(v1.y, v2.y) && - detail::compute_equal::is_iec559>::call(v1.z, v2.z) && - detail::compute_equal::is_iec559>::call(v1.w, v2.w); - } - }; - - template - struct compute_vec4_nequal - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return !compute_vec4_equal::value, sizeof(T) * 8, detail::is_aligned::value>::call(v1, v2); - } - }; - - template - struct compute_vec4_bitwise_not - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& v) - { - return vec<4, T, Q>(~v.x, ~v.y, ~v.z, ~v.w); - } - }; -}//namespace detail - - // -- Implicit basic constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec() -# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE - : x(0), y(0), z(0), w(0) -# endif - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, T, Q> const& v) - : x(v.x), y(v.y), z(v.z), w(v.w) - {} -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, T, P> const& v) - : x(v.x), y(v.y), z(v.z), w(v.w) - {} - - // -- Explicit basic constructors -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(T scalar) - : x(scalar), y(scalar), z(scalar), w(scalar) - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(T _x, T _y, T _z, T _w) - : x(_x), y(_y), z(_z), w(_w) - {} - - // -- Conversion scalar constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.x)) - , z(static_cast(v.x)) - , w(static_cast(v.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, Z _z, W _w) - : x(static_cast(_x)) - , y(static_cast(_y)) - , z(static_cast(_z)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z, W _w) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - , z(static_cast(_z)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z, W _w) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - , z(static_cast(_z)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, W _w) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - , z(static_cast(_z)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z, W _w) - : x(static_cast(_x)) - , y(static_cast(_y)) - , z(static_cast(_z.x)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, W _w) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - , z(static_cast(_z.x)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - , z(static_cast(_z.x)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - , z(static_cast(_z.x)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z, vec<1, W, Q> const& _w) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - , z(static_cast(_z)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - , z(static_cast(_z)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - , z(static_cast(_z)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) - : x(static_cast(_x)) - , y(static_cast(_y)) - , z(static_cast(_z.x)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - , z(static_cast(_z.x)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - , z(static_cast(_z.x)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - , z(static_cast(_z.x)) - , w(static_cast(_w.x)) - {} - - // -- Conversion vector constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, B _z, C _w) - : x(static_cast(_xy.x)) - , y(static_cast(_xy.y)) - , z(static_cast(_z)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, C _w) - : x(static_cast(_xy.x)) - , y(static_cast(_xy.y)) - , z(static_cast(_z.x)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, B _z, vec<1, C, P> const& _w) - : x(static_cast(_xy.x)) - , y(static_cast(_xy.y)) - , z(static_cast(_z)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, vec<1, C, P> const& _w) - : x(static_cast(_xy.x)) - , y(static_cast(_xy.y)) - , z(static_cast(_z.x)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<2, B, P> const& _yz, C _w) - : x(static_cast(_x)) - , y(static_cast(_yz.x)) - , z(static_cast(_yz.y)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, C _w) - : x(static_cast(_x.x)) - , y(static_cast(_yz.x)) - , z(static_cast(_yz.y)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w) - : x(static_cast(_x)) - , y(static_cast(_yz.x)) - , z(static_cast(_yz.y)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w) - : x(static_cast(_x.x)) - , y(static_cast(_yz.x)) - , z(static_cast(_yz.y)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, B _y, vec<2, C, P> const& _zw) - : x(static_cast(_x)) - , y(static_cast(_y)) - , z(static_cast(_zw.x)) - , w(static_cast(_zw.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, B _y, vec<2, C, P> const& _zw) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - , z(static_cast(_zw.x)) - , w(static_cast(_zw.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - , z(static_cast(_zw.x)) - , w(static_cast(_zw.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - , z(static_cast(_zw.x)) - , w(static_cast(_zw.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<3, A, P> const& _xyz, B _w) - : x(static_cast(_xyz.x)) - , y(static_cast(_xyz.y)) - , z(static_cast(_xyz.z)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<3, A, P> const& _xyz, vec<1, B, P> const& _w) - : x(static_cast(_xyz.x)) - , y(static_cast(_xyz.y)) - , z(static_cast(_xyz.z)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<3, B, P> const& _yzw) - : x(static_cast(_x)) - , y(static_cast(_yzw.x)) - , z(static_cast(_yzw.y)) - , w(static_cast(_yzw.z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<3, B, P> const& _yzw) - : x(static_cast(_x.x)) - , y(static_cast(_yzw.x)) - , z(static_cast(_yzw.y)) - , w(static_cast(_yzw.z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<2, B, P> const& _zw) - : x(static_cast(_xy.x)) - , y(static_cast(_xy.y)) - , z(static_cast(_zw.x)) - , w(static_cast(_zw.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.y)) - , z(static_cast(v.z)) - , w(static_cast(v.w)) - {} - - // -- Component accesses -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T& vec<4, T, Q>::operator[](typename vec<4, T, Q>::length_type i) - { - assert(i >= 0 && i < this->length()); - switch(i) - { - default: - case 0: - return x; - case 1: - return y; - case 2: - return z; - case 3: - return w; - } - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<4, T, Q>::operator[](typename vec<4, T, Q>::length_type i) const - { - assert(i >= 0 && i < this->length()); - switch(i) - { - default: - case 0: - return x; - case 1: - return y; - case 2: - return z; - case 3: - return w; - } - } - - // -- Unary arithmetic operators -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>& vec<4, T, Q>::operator=(vec<4, T, Q> const& v) - { - this->x = v.x; - this->y = v.y; - this->z = v.z; - this->w = v.w; - return *this; - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>& vec<4, T, Q>::operator=(vec<4, U, Q> const& v) - { - this->x = static_cast(v.x); - this->y = static_cast(v.y); - this->z = static_cast(v.z); - this->w = static_cast(v.w); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(U scalar) - { - return (*this = detail::compute_vec4_add::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_add::value>::call(*this, vec<4, T, Q>(v.x))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_add::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(U scalar) - { - return (*this = detail::compute_vec4_sub::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_sub::value>::call(*this, vec<4, T, Q>(v.x))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_sub::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(U scalar) - { - return (*this = detail::compute_vec4_mul::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_mul::value>::call(*this, vec<4, T, Q>(v.x))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_mul::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(U scalar) - { - return (*this = detail::compute_vec4_div::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_div::value>::call(*this, vec<4, T, Q>(v.x))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_div::value>::call(*this, vec<4, T, Q>(v))); - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator++() - { - ++this->x; - ++this->y; - ++this->z; - ++this->w; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator--() - { - --this->x; - --this->y; - --this->z; - --this->w; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> vec<4, T, Q>::operator++(int) - { - vec<4, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> vec<4, T, Q>::operator--(int) - { - vec<4, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary bit operators -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(U scalar) - { - return (*this = detail::compute_vec4_mod::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_mod::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_mod::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(U scalar) - { - return (*this = detail::compute_vec4_and::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_and::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_and::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(U scalar) - { - return (*this = detail::compute_vec4_or::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_or::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_or::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(U scalar) - { - return (*this = detail::compute_vec4_xor::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_xor::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_xor::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(U scalar) - { - return (*this = detail::compute_vec4_shift_left::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_shift_left::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_shift_left::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(U scalar) - { - return (*this = detail::compute_vec4_shift_right::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_shift_right::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_shift_right::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - // -- Unary constant operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v) - { - return v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v) - { - return vec<4, T, Q>(0) -= v; - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v, T const & scalar) - { - return vec<4, T, Q>(v) += scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) += v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(v) += scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v2) += v1; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) += v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v, T const & scalar) - { - return vec<4, T, Q>(v) -= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) -= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) -= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1.x) -= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) -= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, T const & scalar) - { - return vec<4, T, Q>(v) *= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) *= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(v) *= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v2) *= v1; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) *= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v, T const & scalar) - { - return vec<4, T, Q>(v) /= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) /= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) /= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1.x) /= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) /= v2; - } - - // -- Binary bit operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, T scalar) - { - return vec<4, T, Q>(v) %= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) %= v2.x; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) %= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar.x) %= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) %= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, T scalar) - { - return vec<4, T, Q>(v) &= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<4, T, Q>(v) &= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) &= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1.x) &= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) &= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, T scalar) - { - return vec<4, T, Q>(v) |= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) |= v2.x; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) |= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1.x) |= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) |= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, T scalar) - { - return vec<4, T, Q>(v) ^= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) ^= v2.x; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) ^= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1.x) ^= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) ^= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, T scalar) - { - return vec<4, T, Q>(v) <<= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) <<= v2.x; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) <<= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1.x) <<= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) <<= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, T scalar) - { - return vec<4, T, Q>(v) >>= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) >>= v2.x; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) >>= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1.x) >>= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) >>= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator~(vec<4, T, Q> const& v) - { - return detail::compute_vec4_bitwise_not::value, sizeof(T) * 8, detail::is_aligned::value>::call(v); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return detail::compute_vec4_equal::value, sizeof(T) * 8, detail::is_aligned::value>::call(v1, v2); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return detail::compute_vec4_nequal::value, sizeof(T) * 8, detail::is_aligned::value>::call(v1, v2); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> operator&&(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2) - { - return vec<4, bool, Q>(v1.x && v2.x, v1.y && v2.y, v1.z && v2.z, v1.w && v2.w); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> operator||(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2) - { - return vec<4, bool, Q>(v1.x || v2.x, v1.y || v2.y, v1.z || v2.z, v1.w || v2.w); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "type_vec4_simd.inl" -#endif diff --git a/third_party/glm/detail/type_vec4_simd.inl b/third_party/glm/detail/type_vec4_simd.inl deleted file mode 100755 index 29559b5..0000000 --- a/third_party/glm/detail/type_vec4_simd.inl +++ /dev/null @@ -1,775 +0,0 @@ -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -namespace glm{ -namespace detail -{ -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - template - struct _swizzle_base1<4, float, Q, E0,E1,E2,E3, true> : public _swizzle_base0 - { - GLM_FUNC_QUALIFIER vec<4, float, Q> operator ()() const - { - __m128 data = *reinterpret_cast<__m128 const*>(&this->_buffer); - - vec<4, float, Q> Result; -# if GLM_ARCH & GLM_ARCH_AVX_BIT - Result.data = _mm_permute_ps(data, _MM_SHUFFLE(E3, E2, E1, E0)); -# else - Result.data = _mm_shuffle_ps(data, data, _MM_SHUFFLE(E3, E2, E1, E0)); -# endif - return Result; - } - }; - - template - struct _swizzle_base1<4, int, Q, E0,E1,E2,E3, true> : public _swizzle_base0 - { - GLM_FUNC_QUALIFIER vec<4, int, Q> operator ()() const - { - __m128i data = *reinterpret_cast<__m128i const*>(&this->_buffer); - - vec<4, int, Q> Result; - Result.data = _mm_shuffle_epi32(data, _MM_SHUFFLE(E3, E2, E1, E0)); - return Result; - } - }; - - template - struct _swizzle_base1<4, uint, Q, E0,E1,E2,E3, true> : public _swizzle_base0 - { - GLM_FUNC_QUALIFIER vec<4, uint, Q> operator ()() const - { - __m128i data = *reinterpret_cast<__m128i const*>(&this->_buffer); - - vec<4, uint, Q> Result; - Result.data = _mm_shuffle_epi32(data, _MM_SHUFFLE(E3, E2, E1, E0)); - return Result; - } - }; -# endif// GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - - template - struct compute_vec4_add - { - static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = _mm_add_ps(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_vec4_add - { - static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b) - { - vec<4, double, Q> Result; - Result.data = _mm256_add_pd(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_sub - { - static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = _mm_sub_ps(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_vec4_sub - { - static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b) - { - vec<4, double, Q> Result; - Result.data = _mm256_sub_pd(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_mul - { - static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = _mm_mul_ps(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_vec4_mul - { - static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b) - { - vec<4, double, Q> Result; - Result.data = _mm256_mul_pd(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_div - { - static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = _mm_div_ps(a.data, b.data); - return Result; - } - }; - - # if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_vec4_div - { - static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b) - { - vec<4, double, Q> Result; - Result.data = _mm256_div_pd(a.data, b.data); - return Result; - } - }; -# endif - - template<> - struct compute_vec4_div - { - static vec<4, float, aligned_lowp> call(vec<4, float, aligned_lowp> const& a, vec<4, float, aligned_lowp> const& b) - { - vec<4, float, aligned_lowp> Result; - Result.data = _mm_mul_ps(a.data, _mm_rcp_ps(b.data)); - return Result; - } - }; - - template - struct compute_vec4_and - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm_and_si128(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - template - struct compute_vec4_and - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm256_and_si256(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_or - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm_or_si128(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - template - struct compute_vec4_or - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm256_or_si256(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_xor - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm_xor_si128(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - template - struct compute_vec4_xor - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm256_xor_si256(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_shift_left - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm_sll_epi32(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - template - struct compute_vec4_shift_left - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm256_sll_epi64(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_shift_right - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm_srl_epi32(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - template - struct compute_vec4_shift_right - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm256_srl_epi64(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_bitwise_not - { - static vec<4, T, Q> call(vec<4, T, Q> const& v) - { - vec<4, T, Q> Result; - Result.data = _mm_xor_si128(v.data, _mm_set1_epi32(-1)); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - template - struct compute_vec4_bitwise_not - { - static vec<4, T, Q> call(vec<4, T, Q> const& v) - { - vec<4, T, Q> Result; - Result.data = _mm256_xor_si256(v.data, _mm_set1_epi32(-1)); - return Result; - } - }; -# endif - - template - struct compute_vec4_equal - { - static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) - { - return _mm_movemask_ps(_mm_cmpeq_ps(v1.data, v2.data)) != 0; - } - }; - -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - template - struct compute_vec4_equal - { - static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) - { - //return _mm_movemask_epi8(_mm_cmpeq_epi32(v1.data, v2.data)) != 0; - __m128i neq = _mm_xor_si128(v1.data, v2.data); - return _mm_test_all_zeros(neq, neq) == 0; - } - }; -# endif - - template - struct compute_vec4_nequal - { - static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) - { - return _mm_movemask_ps(_mm_cmpneq_ps(v1.data, v2.data)) != 0; - } - }; - -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - template - struct compute_vec4_nequal - { - static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) - { - //return _mm_movemask_epi8(_mm_cmpneq_epi32(v1.data, v2.data)) != 0; - __m128i neq = _mm_xor_si128(v1.data, v2.data); - return _mm_test_all_zeros(neq, neq) != 0; - } - }; -# endif -}//namespace detail - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(float _s) : - data(_mm_set1_ps(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(float _s) : - data(_mm_set1_ps(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(float _s) : - data(_mm_set1_ps(_s)) - {} - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, aligned_lowp>::vec(double _s) : - data(_mm256_set1_pd(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, aligned_mediump>::vec(double _s) : - data(_mm256_set1_pd(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, aligned_highp>::vec(double _s) : - data(_mm256_set1_pd(_s)) - {} -# endif - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_lowp>::vec(int _s) : - data(_mm_set1_epi32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_mediump>::vec(int _s) : - data(_mm_set1_epi32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_highp>::vec(int _s) : - data(_mm_set1_epi32(_s)) - {} - -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, detail::int64, aligned_lowp>::vec(detail::int64 _s) : - data(_mm256_set1_epi64x(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, detail::int64, aligned_mediump>::vec(detail::int64 _s) : - data(_mm256_set1_epi64x(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, detail::int64, aligned_highp>::vec(detail::int64 _s) : - data(_mm256_set1_epi64x(_s)) - {} -# endif - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(float _x, float _y, float _z, float _w) : - data(_mm_set_ps(_w, _z, _y, _x)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(float _x, float _y, float _z, float _w) : - data(_mm_set_ps(_w, _z, _y, _x)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(float _x, float _y, float _z, float _w) : - data(_mm_set_ps(_w, _z, _y, _x)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_lowp>::vec(int _x, int _y, int _z, int _w) : - data(_mm_set_epi32(_w, _z, _y, _x)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_mediump>::vec(int _x, int _y, int _z, int _w) : - data(_mm_set_epi32(_w, _z, _y, _x)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_highp>::vec(int _x, int _y, int _z, int _w) : - data(_mm_set_epi32(_w, _z, _y, _x)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(int _x, int _y, int _z, int _w) : - data(_mm_cvtepi32_ps(_mm_set_epi32(_w, _z, _y, _x))) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(int _x, int _y, int _z, int _w) : - data(_mm_cvtepi32_ps(_mm_set_epi32(_w, _z, _y, _x))) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(int _x, int _y, int _z, int _w) : - data(_mm_cvtepi32_ps(_mm_set_epi32(_w, _z, _y, _x))) - {} -}//namespace glm - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT - -#if GLM_ARCH & GLM_ARCH_NEON_BIT -namespace glm { -namespace detail { - - template - struct compute_vec4_add - { - static - vec<4, float, Q> - call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = vaddq_f32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_add - { - static - vec<4, uint, Q> - call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b) - { - vec<4, uint, Q> Result; - Result.data = vaddq_u32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_add - { - static - vec<4, int, Q> - call(vec<4, int, Q> const& a, vec<4, int, Q> const& b) - { - vec<4, uint, Q> Result; - Result.data = vaddq_s32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_sub - { - static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = vsubq_f32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_sub - { - static vec<4, uint, Q> call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b) - { - vec<4, uint, Q> Result; - Result.data = vsubq_u32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_sub - { - static vec<4, int, Q> call(vec<4, int, Q> const& a, vec<4, int, Q> const& b) - { - vec<4, int, Q> Result; - Result.data = vsubq_s32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_mul - { - static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = vmulq_f32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_mul - { - static vec<4, uint, Q> call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b) - { - vec<4, uint, Q> Result; - Result.data = vmulq_u32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_mul - { - static vec<4, int, Q> call(vec<4, int, Q> const& a, vec<4, int, Q> const& b) - { - vec<4, int, Q> Result; - Result.data = vmulq_s32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_div - { - static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = vdivq_f32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_equal - { - static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) - { - uint32x4_t cmp = vceqq_f32(v1.data, v2.data); -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - cmp = vpminq_u32(cmp, cmp); - cmp = vpminq_u32(cmp, cmp); - uint32_t r = cmp[0]; -#else - uint32x2_t cmpx2 = vpmin_u32(vget_low_f32(cmp), vget_high_f32(cmp)); - cmpx2 = vpmin_u32(cmpx2, cmpx2); - uint32_t r = cmpx2[0]; -#endif - return r == ~0u; - } - }; - - template - struct compute_vec4_equal - { - static bool call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2) - { - uint32x4_t cmp = vceqq_u32(v1.data, v2.data); -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - cmp = vpminq_u32(cmp, cmp); - cmp = vpminq_u32(cmp, cmp); - uint32_t r = cmp[0]; -#else - uint32x2_t cmpx2 = vpmin_u32(vget_low_f32(cmp), vget_high_f32(cmp)); - cmpx2 = vpmin_u32(cmpx2, cmpx2); - uint32_t r = cmpx2[0]; -#endif - return r == ~0u; - } - }; - - template - struct compute_vec4_equal - { - static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) - { - uint32x4_t cmp = vceqq_s32(v1.data, v2.data); -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - cmp = vpminq_u32(cmp, cmp); - cmp = vpminq_u32(cmp, cmp); - uint32_t r = cmp[0]; -#else - uint32x2_t cmpx2 = vpmin_u32(vget_low_f32(cmp), vget_high_f32(cmp)); - cmpx2 = vpmin_u32(cmpx2, cmpx2); - uint32_t r = cmpx2[0]; -#endif - return r == ~0u; - } - }; - - template - struct compute_vec4_nequal - { - static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) - { - return !compute_vec4_equal::call(v1, v2); - } - }; - - template - struct compute_vec4_nequal - { - static bool call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2) - { - return !compute_vec4_equal::call(v1, v2); - } - }; - - template - struct compute_vec4_nequal - { - static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) - { - return !compute_vec4_equal::call(v1, v2); - } - }; - -}//namespace detail - -#if !GLM_CONFIG_XYZW_ONLY - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(float _s) : - data(vdupq_n_f32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(float _s) : - data(vdupq_n_f32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(float _s) : - data(vdupq_n_f32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_lowp>::vec(int _s) : - data(vdupq_n_s32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_mediump>::vec(int _s) : - data(vdupq_n_s32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_highp>::vec(int _s) : - data(vdupq_n_s32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, uint, aligned_lowp>::vec(uint _s) : - data(vdupq_n_u32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, uint, aligned_mediump>::vec(uint _s) : - data(vdupq_n_u32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, uint, aligned_highp>::vec(uint _s) : - data(vdupq_n_u32(_s)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(const vec<4, float, aligned_highp>& rhs) : - data(rhs.data) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(const vec<4, int, aligned_highp>& rhs) : - data(vcvtq_f32_s32(rhs.data)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(const vec<4, uint, aligned_highp>& rhs) : - data(vcvtq_f32_u32(rhs.data)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(int _x, int _y, int _z, int _w) : - data(vcvtq_f32_s32(vec<4, int, aligned_lowp>(_x, _y, _z, _w).data)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(int _x, int _y, int _z, int _w) : - data(vcvtq_f32_s32(vec<4, int, aligned_mediump>(_x, _y, _z, _w).data)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(int _x, int _y, int _z, int _w) : - data(vcvtq_f32_s32(vec<4, int, aligned_highp>(_x, _y, _z, _w).data)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(uint _x, uint _y, uint _z, uint _w) : - data(vcvtq_f32_u32(vec<4, uint, aligned_lowp>(_x, _y, _z, _w).data)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(uint _x, uint _y, uint _z, uint _w) : - data(vcvtq_f32_u32(vec<4, uint, aligned_mediump>(_x, _y, _z, _w).data)) - {} - - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(uint _x, uint _y, uint _z, uint _w) : - data(vcvtq_f32_u32(vec<4, uint, aligned_highp>(_x, _y, _z, _w).data)) - {} - -#endif -}//namespace glm - -#endif diff --git a/third_party/glm/exponential.hpp b/third_party/glm/exponential.hpp deleted file mode 100755 index f8fb886..0000000 --- a/third_party/glm/exponential.hpp +++ /dev/null @@ -1,110 +0,0 @@ -/// @ref core -/// @file glm/exponential.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions -/// -/// @defgroup core_func_exponential Exponential functions -/// @ingroup core -/// -/// Provides GLSL exponential functions -/// -/// These all operate component-wise. The description is per component. -/// -/// Include to use these core features. - -#pragma once - -#include "detail/type_vec1.hpp" -#include "detail/type_vec2.hpp" -#include "detail/type_vec3.hpp" -#include "detail/type_vec4.hpp" -#include - -namespace glm -{ - /// @addtogroup core_func_exponential - /// @{ - - /// Returns 'base' raised to the power 'exponent'. - /// - /// @param base Floating point value. pow function is defined for input values of 'base' defined in the range (inf-, inf+) in the limit of the type qualifier. - /// @param exponent Floating point value representing the 'exponent'. - /// - /// @see GLSL pow man page - /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions - template - GLM_FUNC_DECL vec pow(vec const& base, vec const& exponent); - - /// Returns the natural exponentiation of x, i.e., e^x. - /// - /// @param v exp function is defined for input values of v defined in the range (inf-, inf+) in the limit of the type qualifier. - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL exp man page - /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions - template - GLM_FUNC_DECL vec exp(vec const& v); - - /// Returns the natural logarithm of v, i.e., - /// returns the value y which satisfies the equation x = e^y. - /// Results are undefined if v <= 0. - /// - /// @param v log function is defined for input values of v defined in the range (0, inf+) in the limit of the type qualifier. - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL log man page - /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions - template - GLM_FUNC_DECL vec log(vec const& v); - - /// Returns 2 raised to the v power. - /// - /// @param v exp2 function is defined for input values of v defined in the range (inf-, inf+) in the limit of the type qualifier. - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL exp2 man page - /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions - template - GLM_FUNC_DECL vec exp2(vec const& v); - - /// Returns the base 2 log of x, i.e., returns the value y, - /// which satisfies the equation x = 2 ^ y. - /// - /// @param v log2 function is defined for input values of v defined in the range (0, inf+) in the limit of the type qualifier. - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL log2 man page - /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions - template - GLM_FUNC_DECL vec log2(vec const& v); - - /// Returns the positive square root of v. - /// - /// @param v sqrt function is defined for input values of v defined in the range [0, inf+) in the limit of the type qualifier. - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL sqrt man page - /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions - template - GLM_FUNC_DECL vec sqrt(vec const& v); - - /// Returns the reciprocal of the positive square root of v. - /// - /// @param v inversesqrt function is defined for input values of v defined in the range [0, inf+) in the limit of the type qualifier. - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL inversesqrt man page - /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions - template - GLM_FUNC_DECL vec inversesqrt(vec const& v); - - /// @} -}//namespace glm - -#include "detail/func_exponential.inl" diff --git a/third_party/glm/ext.hpp b/third_party/glm/ext.hpp deleted file mode 100755 index 3bc8db2..0000000 --- a/third_party/glm/ext.hpp +++ /dev/null @@ -1,196 +0,0 @@ -/// @file glm/ext.hpp -/// -/// @ref core (Dependence) - -#include "detail/setup.hpp" - -#pragma once - -#include "glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_MESSAGE_EXT_INCLUDED_DISPLAYED) -# define GLM_MESSAGE_EXT_INCLUDED_DISPLAYED -# pragma message("GLM: All extensions included (not recommended)") -#endif//GLM_MESSAGES - -#include "./ext/matrix_double2x2.hpp" -#include "./ext/matrix_double2x2_precision.hpp" -#include "./ext/matrix_double2x3.hpp" -#include "./ext/matrix_double2x3_precision.hpp" -#include "./ext/matrix_double2x4.hpp" -#include "./ext/matrix_double2x4_precision.hpp" -#include "./ext/matrix_double3x2.hpp" -#include "./ext/matrix_double3x2_precision.hpp" -#include "./ext/matrix_double3x3.hpp" -#include "./ext/matrix_double3x3_precision.hpp" -#include "./ext/matrix_double3x4.hpp" -#include "./ext/matrix_double3x4_precision.hpp" -#include "./ext/matrix_double4x2.hpp" -#include "./ext/matrix_double4x2_precision.hpp" -#include "./ext/matrix_double4x3.hpp" -#include "./ext/matrix_double4x3_precision.hpp" -#include "./ext/matrix_double4x4.hpp" -#include "./ext/matrix_double4x4_precision.hpp" - -#include "./ext/matrix_float2x2.hpp" -#include "./ext/matrix_float2x2_precision.hpp" -#include "./ext/matrix_float2x3.hpp" -#include "./ext/matrix_float2x3_precision.hpp" -#include "./ext/matrix_float2x4.hpp" -#include "./ext/matrix_float2x4_precision.hpp" -#include "./ext/matrix_float3x2.hpp" -#include "./ext/matrix_float3x2_precision.hpp" -#include "./ext/matrix_float3x3.hpp" -#include "./ext/matrix_float3x3_precision.hpp" -#include "./ext/matrix_float3x4.hpp" -#include "./ext/matrix_float3x4_precision.hpp" -#include "./ext/matrix_float4x2.hpp" -#include "./ext/matrix_float4x2_precision.hpp" -#include "./ext/matrix_float4x3.hpp" -#include "./ext/matrix_float4x3_precision.hpp" -#include "./ext/matrix_float4x4.hpp" -#include "./ext/matrix_float4x4_precision.hpp" - -#include "./ext/matrix_relational.hpp" - -#include "./ext/quaternion_double.hpp" -#include "./ext/quaternion_double_precision.hpp" -#include "./ext/quaternion_float.hpp" -#include "./ext/quaternion_float_precision.hpp" -#include "./ext/quaternion_geometric.hpp" -#include "./ext/quaternion_relational.hpp" - -#include "./ext/scalar_constants.hpp" -#include "./ext/scalar_int_sized.hpp" -#include "./ext/scalar_relational.hpp" - -#include "./ext/vector_bool1.hpp" -#include "./ext/vector_bool1_precision.hpp" -#include "./ext/vector_bool2.hpp" -#include "./ext/vector_bool2_precision.hpp" -#include "./ext/vector_bool3.hpp" -#include "./ext/vector_bool3_precision.hpp" -#include "./ext/vector_bool4.hpp" -#include "./ext/vector_bool4_precision.hpp" - -#include "./ext/vector_double1.hpp" -#include "./ext/vector_double1_precision.hpp" -#include "./ext/vector_double2.hpp" -#include "./ext/vector_double2_precision.hpp" -#include "./ext/vector_double3.hpp" -#include "./ext/vector_double3_precision.hpp" -#include "./ext/vector_double4.hpp" -#include "./ext/vector_double4_precision.hpp" - -#include "./ext/vector_float1.hpp" -#include "./ext/vector_float1_precision.hpp" -#include "./ext/vector_float2.hpp" -#include "./ext/vector_float2_precision.hpp" -#include "./ext/vector_float3.hpp" -#include "./ext/vector_float3_precision.hpp" -#include "./ext/vector_float4.hpp" -#include "./ext/vector_float4_precision.hpp" - -#include "./ext/vector_int1.hpp" -#include "./ext/vector_int1_precision.hpp" -#include "./ext/vector_int2.hpp" -#include "./ext/vector_int2_precision.hpp" -#include "./ext/vector_int3.hpp" -#include "./ext/vector_int3_precision.hpp" -#include "./ext/vector_int4.hpp" -#include "./ext/vector_int4_precision.hpp" - -#include "./ext/vector_relational.hpp" - -#include "./ext/vector_uint1.hpp" -#include "./ext/vector_uint1_precision.hpp" -#include "./ext/vector_uint2.hpp" -#include "./ext/vector_uint2_precision.hpp" -#include "./ext/vector_uint3.hpp" -#include "./ext/vector_uint3_precision.hpp" -#include "./ext/vector_uint4.hpp" -#include "./ext/vector_uint4_precision.hpp" - -#include "./gtc/bitfield.hpp" -#include "./gtc/color_space.hpp" -#include "./gtc/constants.hpp" -#include "./gtc/epsilon.hpp" -#include "./gtc/integer.hpp" -#include "./gtc/matrix_access.hpp" -#include "./gtc/matrix_integer.hpp" -#include "./gtc/matrix_inverse.hpp" -#include "./gtc/matrix_transform.hpp" -#include "./gtc/noise.hpp" -#include "./gtc/packing.hpp" -#include "./gtc/quaternion.hpp" -#include "./gtc/random.hpp" -#include "./gtc/reciprocal.hpp" -#include "./gtc/round.hpp" -#include "./gtc/type_precision.hpp" -#include "./gtc/type_ptr.hpp" -#include "./gtc/ulp.hpp" -#include "./gtc/vec1.hpp" -#if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE -# include "./gtc/type_aligned.hpp" -#endif - -#ifdef GLM_ENABLE_EXPERIMENTAL -#include "./gtx/associated_min_max.hpp" -#include "./gtx/bit.hpp" -#include "./gtx/closest_point.hpp" -#include "./gtx/color_encoding.hpp" -#include "./gtx/color_space.hpp" -#include "./gtx/color_space_YCoCg.hpp" -#include "./gtx/compatibility.hpp" -#include "./gtx/component_wise.hpp" -#include "./gtx/dual_quaternion.hpp" -#include "./gtx/euler_angles.hpp" -#include "./gtx/extend.hpp" -#include "./gtx/extended_min_max.hpp" -#include "./gtx/fast_exponential.hpp" -#include "./gtx/fast_square_root.hpp" -#include "./gtx/fast_trigonometry.hpp" -#include "./gtx/functions.hpp" -#include "./gtx/gradient_paint.hpp" -#include "./gtx/handed_coordinate_space.hpp" -#include "./gtx/integer.hpp" -#include "./gtx/intersect.hpp" -#include "./gtx/log_base.hpp" -#include "./gtx/matrix_cross_product.hpp" -#include "./gtx/matrix_interpolation.hpp" -#include "./gtx/matrix_major_storage.hpp" -#include "./gtx/matrix_operation.hpp" -#include "./gtx/matrix_query.hpp" -#include "./gtx/mixed_product.hpp" -#include "./gtx/norm.hpp" -#include "./gtx/normal.hpp" -#include "./gtx/normalize_dot.hpp" -#include "./gtx/number_precision.hpp" -#include "./gtx/optimum_pow.hpp" -#include "./gtx/orthonormalize.hpp" -#include "./gtx/perpendicular.hpp" -#include "./gtx/polar_coordinates.hpp" -#include "./gtx/projection.hpp" -#include "./gtx/quaternion.hpp" -#include "./gtx/raw_data.hpp" -#include "./gtx/rotate_vector.hpp" -#include "./gtx/spline.hpp" -#include "./gtx/std_based_type.hpp" -#if !(GLM_COMPILER & GLM_COMPILER_CUDA) -# include "./gtx/string_cast.hpp" -#endif -#include "./gtx/transform.hpp" -#include "./gtx/transform2.hpp" -#include "./gtx/vec_swizzle.hpp" -#include "./gtx/vector_angle.hpp" -#include "./gtx/vector_query.hpp" -#include "./gtx/wrap.hpp" - -#if GLM_HAS_TEMPLATE_ALIASES -# include "./gtx/scalar_multiplication.hpp" -#endif - -#if GLM_HAS_RANGE_FOR -# include "./gtx/range.hpp" -#endif -#endif//GLM_ENABLE_EXPERIMENTAL diff --git a/third_party/glm/ext/matrix_clip_space.hpp b/third_party/glm/ext/matrix_clip_space.hpp deleted file mode 100755 index c3874f2..0000000 --- a/third_party/glm/ext/matrix_clip_space.hpp +++ /dev/null @@ -1,522 +0,0 @@ -/// @ref ext_matrix_clip_space -/// @file glm/ext/matrix_clip_space.hpp -/// -/// @defgroup ext_matrix_clip_space GLM_EXT_matrix_clip_space -/// @ingroup ext -/// -/// Defines functions that generate clip space transformation matrices. -/// -/// The matrices generated by this extension use standard OpenGL fixed-function -/// conventions. For example, the lookAt function generates a transform from world -/// space into the specific eye space that the projective matrix functions -/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility -/// specifications defines the particular layout of this eye space. -/// -/// Include to use the features of this extension. -/// -/// @see ext_matrix_transform -/// @see ext_matrix_projection - -#pragma once - -// Dependencies -#include "../ext/scalar_constants.hpp" -#include "../geometric.hpp" -#include "../trigonometric.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_matrix_clip_space extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_matrix_clip_space - /// @{ - - /// Creates a matrix for projecting two-dimensional coordinates onto the screen. - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top, T const& zNear, T const& zFar) - /// @see gluOrtho2D man page - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> ortho( - T left, T right, T bottom, T top); - - /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH_ZO( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume using right-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH_NO( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH_ZO( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume, using right-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH_NO( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoZO( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoNO( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume, using right-handed coordinates. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume, using the default handedness and default near and far clip planes definition. - /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - /// @see glOrtho man page - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> ortho( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a left handed frustum matrix. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH_ZO( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a left handed frustum matrix. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH_NO( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a right handed frustum matrix. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH_ZO( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a right handed frustum matrix. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH_NO( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a frustum matrix using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumZO( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a frustum matrix using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumNO( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a left handed frustum matrix. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a right handed frustum matrix. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a frustum matrix with default handedness, using the default handedness and default near and far clip planes definition. - /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. - /// - /// @tparam T A floating-point scalar type - /// @see glFrustum man page - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustum( - T left, T right, T bottom, T top, T near, T far); - - - /// Creates a matrix for a right handed, symetric perspective-view frustum. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH_ZO( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a right handed, symetric perspective-view frustum. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH_NO( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a left handed, symetric perspective-view frustum. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH_ZO( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a left handed, symetric perspective-view frustum. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH_NO( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a symetric perspective-view frustum using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveZO( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a symetric perspective-view frustum using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveNO( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a right handed, symetric perspective-view frustum. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a left handed, symetric perspective-view frustum. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a symetric perspective-view frustum based on the default handedness and default near and far clip planes definition. - /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. - /// - /// @param fovy Specifies the field of view angle in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - /// @see gluPerspective man page - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspective( - T fovy, T aspect, T near, T far); - - /// Builds a perspective projection matrix based on a field of view using right-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH_ZO( - T fov, T width, T height, T near, T far); - - /// Builds a perspective projection matrix based on a field of view using right-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH_NO( - T fov, T width, T height, T near, T far); - - /// Builds a perspective projection matrix based on a field of view using left-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH_ZO( - T fov, T width, T height, T near, T far); - - /// Builds a perspective projection matrix based on a field of view using left-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH_NO( - T fov, T width, T height, T near, T far); - - /// Builds a perspective projection matrix based on a field of view using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovZO( - T fov, T width, T height, T near, T far); - - /// Builds a perspective projection matrix based on a field of view using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovNO( - T fov, T width, T height, T near, T far); - - /// Builds a right handed perspective projection matrix based on a field of view. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH( - T fov, T width, T height, T near, T far); - - /// Builds a left handed perspective projection matrix based on a field of view. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH( - T fov, T width, T height, T near, T far); - - /// Builds a perspective projection matrix based on a field of view and the default handedness and default near and far clip planes definition. - /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFov( - T fov, T width, T height, T near, T far); - - /// Creates a matrix for a left handed, symmetric perspective-view frustum with far plane at infinite. - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspectiveLH( - T fovy, T aspect, T near); - - /// Creates a matrix for a right handed, symmetric perspective-view frustum with far plane at infinite. - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspectiveRH( - T fovy, T aspect, T near); - - /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite with default handedness. - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspective( - T fovy, T aspect, T near); - - /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite for graphics hardware that doesn't support depth clamping. - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> tweakedInfinitePerspective( - T fovy, T aspect, T near); - - /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite for graphics hardware that doesn't support depth clamping. - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param ep Epsilon - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> tweakedInfinitePerspective( - T fovy, T aspect, T near, T ep); - - /// @} -}//namespace glm - -#include "matrix_clip_space.inl" diff --git a/third_party/glm/ext/matrix_clip_space.inl b/third_party/glm/ext/matrix_clip_space.inl deleted file mode 100755 index 7e4df33..0000000 --- a/third_party/glm/ext/matrix_clip_space.inl +++ /dev/null @@ -1,555 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> ortho(T left, T right, T bottom, T top) - { - mat<4, 4, T, defaultp> Result(static_cast(1)); - Result[0][0] = static_cast(2) / (right - left); - Result[1][1] = static_cast(2) / (top - bottom); - Result[2][2] = - static_cast(1); - Result[3][0] = - (right + left) / (right - left); - Result[3][1] = - (top + bottom) / (top - bottom); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH_ZO(T left, T right, T bottom, T top, T zNear, T zFar) - { - mat<4, 4, T, defaultp> Result(1); - Result[0][0] = static_cast(2) / (right - left); - Result[1][1] = static_cast(2) / (top - bottom); - Result[2][2] = static_cast(1) / (zFar - zNear); - Result[3][0] = - (right + left) / (right - left); - Result[3][1] = - (top + bottom) / (top - bottom); - Result[3][2] = - zNear / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH_NO(T left, T right, T bottom, T top, T zNear, T zFar) - { - mat<4, 4, T, defaultp> Result(1); - Result[0][0] = static_cast(2) / (right - left); - Result[1][1] = static_cast(2) / (top - bottom); - Result[2][2] = static_cast(2) / (zFar - zNear); - Result[3][0] = - (right + left) / (right - left); - Result[3][1] = - (top + bottom) / (top - bottom); - Result[3][2] = - (zFar + zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH_ZO(T left, T right, T bottom, T top, T zNear, T zFar) - { - mat<4, 4, T, defaultp> Result(1); - Result[0][0] = static_cast(2) / (right - left); - Result[1][1] = static_cast(2) / (top - bottom); - Result[2][2] = - static_cast(1) / (zFar - zNear); - Result[3][0] = - (right + left) / (right - left); - Result[3][1] = - (top + bottom) / (top - bottom); - Result[3][2] = - zNear / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH_NO(T left, T right, T bottom, T top, T zNear, T zFar) - { - mat<4, 4, T, defaultp> Result(1); - Result[0][0] = static_cast(2) / (right - left); - Result[1][1] = static_cast(2) / (top - bottom); - Result[2][2] = - static_cast(2) / (zFar - zNear); - Result[3][0] = - (right + left) / (right - left); - Result[3][1] = - (top + bottom) / (top - bottom); - Result[3][2] = - (zFar + zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoZO(T left, T right, T bottom, T top, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return orthoLH_ZO(left, right, bottom, top, zNear, zFar); -# else - return orthoRH_ZO(left, right, bottom, top, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoNO(T left, T right, T bottom, T top, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return orthoLH_NO(left, right, bottom, top, zNear, zFar); -# else - return orthoRH_NO(left, right, bottom, top, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH(T left, T right, T bottom, T top, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return orthoLH_ZO(left, right, bottom, top, zNear, zFar); -# else - return orthoLH_NO(left, right, bottom, top, zNear, zFar); -# endif - - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH(T left, T right, T bottom, T top, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return orthoRH_ZO(left, right, bottom, top, zNear, zFar); -# else - return orthoRH_NO(left, right, bottom, top, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> ortho(T left, T right, T bottom, T top, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO - return orthoLH_ZO(left, right, bottom, top, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO - return orthoLH_NO(left, right, bottom, top, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO - return orthoRH_ZO(left, right, bottom, top, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO - return orthoRH_NO(left, right, bottom, top, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH_ZO(T left, T right, T bottom, T top, T nearVal, T farVal) - { - mat<4, 4, T, defaultp> Result(0); - Result[0][0] = (static_cast(2) * nearVal) / (right - left); - Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); - Result[2][0] = (right + left) / (right - left); - Result[2][1] = (top + bottom) / (top - bottom); - Result[2][2] = farVal / (farVal - nearVal); - Result[2][3] = static_cast(1); - Result[3][2] = -(farVal * nearVal) / (farVal - nearVal); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH_NO(T left, T right, T bottom, T top, T nearVal, T farVal) - { - mat<4, 4, T, defaultp> Result(0); - Result[0][0] = (static_cast(2) * nearVal) / (right - left); - Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); - Result[2][0] = (right + left) / (right - left); - Result[2][1] = (top + bottom) / (top - bottom); - Result[2][2] = (farVal + nearVal) / (farVal - nearVal); - Result[2][3] = static_cast(1); - Result[3][2] = - (static_cast(2) * farVal * nearVal) / (farVal - nearVal); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH_ZO(T left, T right, T bottom, T top, T nearVal, T farVal) - { - mat<4, 4, T, defaultp> Result(0); - Result[0][0] = (static_cast(2) * nearVal) / (right - left); - Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); - Result[2][0] = (right + left) / (right - left); - Result[2][1] = (top + bottom) / (top - bottom); - Result[2][2] = farVal / (nearVal - farVal); - Result[2][3] = static_cast(-1); - Result[3][2] = -(farVal * nearVal) / (farVal - nearVal); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH_NO(T left, T right, T bottom, T top, T nearVal, T farVal) - { - mat<4, 4, T, defaultp> Result(0); - Result[0][0] = (static_cast(2) * nearVal) / (right - left); - Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); - Result[2][0] = (right + left) / (right - left); - Result[2][1] = (top + bottom) / (top - bottom); - Result[2][2] = - (farVal + nearVal) / (farVal - nearVal); - Result[2][3] = static_cast(-1); - Result[3][2] = - (static_cast(2) * farVal * nearVal) / (farVal - nearVal); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumZO(T left, T right, T bottom, T top, T nearVal, T farVal) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return frustumLH_ZO(left, right, bottom, top, nearVal, farVal); -# else - return frustumRH_ZO(left, right, bottom, top, nearVal, farVal); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumNO(T left, T right, T bottom, T top, T nearVal, T farVal) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return frustumLH_NO(left, right, bottom, top, nearVal, farVal); -# else - return frustumRH_NO(left, right, bottom, top, nearVal, farVal); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH(T left, T right, T bottom, T top, T nearVal, T farVal) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return frustumLH_ZO(left, right, bottom, top, nearVal, farVal); -# else - return frustumLH_NO(left, right, bottom, top, nearVal, farVal); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH(T left, T right, T bottom, T top, T nearVal, T farVal) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return frustumRH_ZO(left, right, bottom, top, nearVal, farVal); -# else - return frustumRH_NO(left, right, bottom, top, nearVal, farVal); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustum(T left, T right, T bottom, T top, T nearVal, T farVal) - { -# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO - return frustumLH_ZO(left, right, bottom, top, nearVal, farVal); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO - return frustumLH_NO(left, right, bottom, top, nearVal, farVal); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO - return frustumRH_ZO(left, right, bottom, top, nearVal, farVal); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO - return frustumRH_NO(left, right, bottom, top, nearVal, farVal); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH_ZO(T fovy, T aspect, T zNear, T zFar) - { - assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); - - T const tanHalfFovy = tan(fovy / static_cast(2)); - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); - Result[1][1] = static_cast(1) / (tanHalfFovy); - Result[2][2] = zFar / (zNear - zFar); - Result[2][3] = - static_cast(1); - Result[3][2] = -(zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH_NO(T fovy, T aspect, T zNear, T zFar) - { - assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); - - T const tanHalfFovy = tan(fovy / static_cast(2)); - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); - Result[1][1] = static_cast(1) / (tanHalfFovy); - Result[2][2] = - (zFar + zNear) / (zFar - zNear); - Result[2][3] = - static_cast(1); - Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH_ZO(T fovy, T aspect, T zNear, T zFar) - { - assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); - - T const tanHalfFovy = tan(fovy / static_cast(2)); - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); - Result[1][1] = static_cast(1) / (tanHalfFovy); - Result[2][2] = zFar / (zFar - zNear); - Result[2][3] = static_cast(1); - Result[3][2] = -(zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH_NO(T fovy, T aspect, T zNear, T zFar) - { - assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); - - T const tanHalfFovy = tan(fovy / static_cast(2)); - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); - Result[1][1] = static_cast(1) / (tanHalfFovy); - Result[2][2] = (zFar + zNear) / (zFar - zNear); - Result[2][3] = static_cast(1); - Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveZO(T fovy, T aspect, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return perspectiveLH_ZO(fovy, aspect, zNear, zFar); -# else - return perspectiveRH_ZO(fovy, aspect, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveNO(T fovy, T aspect, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return perspectiveLH_NO(fovy, aspect, zNear, zFar); -# else - return perspectiveRH_NO(fovy, aspect, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH(T fovy, T aspect, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return perspectiveLH_ZO(fovy, aspect, zNear, zFar); -# else - return perspectiveLH_NO(fovy, aspect, zNear, zFar); -# endif - - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH(T fovy, T aspect, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return perspectiveRH_ZO(fovy, aspect, zNear, zFar); -# else - return perspectiveRH_NO(fovy, aspect, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspective(T fovy, T aspect, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO - return perspectiveLH_ZO(fovy, aspect, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO - return perspectiveLH_NO(fovy, aspect, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO - return perspectiveRH_ZO(fovy, aspect, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO - return perspectiveRH_NO(fovy, aspect, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH_ZO(T fov, T width, T height, T zNear, T zFar) - { - assert(width > static_cast(0)); - assert(height > static_cast(0)); - assert(fov > static_cast(0)); - - T const rad = fov; - T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); - T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = w; - Result[1][1] = h; - Result[2][2] = zFar / (zNear - zFar); - Result[2][3] = - static_cast(1); - Result[3][2] = -(zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH_NO(T fov, T width, T height, T zNear, T zFar) - { - assert(width > static_cast(0)); - assert(height > static_cast(0)); - assert(fov > static_cast(0)); - - T const rad = fov; - T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); - T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = w; - Result[1][1] = h; - Result[2][2] = - (zFar + zNear) / (zFar - zNear); - Result[2][3] = - static_cast(1); - Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH_ZO(T fov, T width, T height, T zNear, T zFar) - { - assert(width > static_cast(0)); - assert(height > static_cast(0)); - assert(fov > static_cast(0)); - - T const rad = fov; - T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); - T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = w; - Result[1][1] = h; - Result[2][2] = zFar / (zFar - zNear); - Result[2][3] = static_cast(1); - Result[3][2] = -(zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH_NO(T fov, T width, T height, T zNear, T zFar) - { - assert(width > static_cast(0)); - assert(height > static_cast(0)); - assert(fov > static_cast(0)); - - T const rad = fov; - T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); - T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = w; - Result[1][1] = h; - Result[2][2] = (zFar + zNear) / (zFar - zNear); - Result[2][3] = static_cast(1); - Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovZO(T fov, T width, T height, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return perspectiveFovLH_ZO(fov, width, height, zNear, zFar); -# else - return perspectiveFovRH_ZO(fov, width, height, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovNO(T fov, T width, T height, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return perspectiveFovLH_NO(fov, width, height, zNear, zFar); -# else - return perspectiveFovRH_NO(fov, width, height, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH(T fov, T width, T height, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return perspectiveFovLH_ZO(fov, width, height, zNear, zFar); -# else - return perspectiveFovLH_NO(fov, width, height, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH(T fov, T width, T height, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return perspectiveFovRH_ZO(fov, width, height, zNear, zFar); -# else - return perspectiveFovRH_NO(fov, width, height, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFov(T fov, T width, T height, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO - return perspectiveFovLH_ZO(fov, width, height, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO - return perspectiveFovLH_NO(fov, width, height, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO - return perspectiveFovRH_ZO(fov, width, height, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO - return perspectiveFovRH_NO(fov, width, height, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspectiveRH(T fovy, T aspect, T zNear) - { - T const range = tan(fovy / static_cast(2)) * zNear; - T const left = -range * aspect; - T const right = range * aspect; - T const bottom = -range; - T const top = range; - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = (static_cast(2) * zNear) / (right - left); - Result[1][1] = (static_cast(2) * zNear) / (top - bottom); - Result[2][2] = - static_cast(1); - Result[2][3] = - static_cast(1); - Result[3][2] = - static_cast(2) * zNear; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspectiveLH(T fovy, T aspect, T zNear) - { - T const range = tan(fovy / static_cast(2)) * zNear; - T const left = -range * aspect; - T const right = range * aspect; - T const bottom = -range; - T const top = range; - - mat<4, 4, T, defaultp> Result(T(0)); - Result[0][0] = (static_cast(2) * zNear) / (right - left); - Result[1][1] = (static_cast(2) * zNear) / (top - bottom); - Result[2][2] = static_cast(1); - Result[2][3] = static_cast(1); - Result[3][2] = - static_cast(2) * zNear; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspective(T fovy, T aspect, T zNear) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return infinitePerspectiveLH(fovy, aspect, zNear); -# else - return infinitePerspectiveRH(fovy, aspect, zNear); -# endif - } - - // Infinite projection matrix: http://www.terathon.com/gdc07_lengyel.pdf - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> tweakedInfinitePerspective(T fovy, T aspect, T zNear, T ep) - { - T const range = tan(fovy / static_cast(2)) * zNear; - T const left = -range * aspect; - T const right = range * aspect; - T const bottom = -range; - T const top = range; - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = (static_cast(2) * zNear) / (right - left); - Result[1][1] = (static_cast(2) * zNear) / (top - bottom); - Result[2][2] = ep - static_cast(1); - Result[2][3] = static_cast(-1); - Result[3][2] = (ep - static_cast(2)) * zNear; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> tweakedInfinitePerspective(T fovy, T aspect, T zNear) - { - return tweakedInfinitePerspective(fovy, aspect, zNear, epsilon()); - } -}//namespace glm diff --git a/third_party/glm/ext/matrix_common.hpp b/third_party/glm/ext/matrix_common.hpp deleted file mode 100755 index 05c3799..0000000 --- a/third_party/glm/ext/matrix_common.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref ext_matrix_common -/// @file glm/ext/matrix_common.hpp -/// -/// @defgroup ext_matrix_common GLM_EXT_matrix_common -/// @ingroup ext -/// -/// Defines functions for common matrix operations. -/// -/// Include to use the features of this extension. -/// -/// @see ext_matrix_common - -#pragma once - -#include "../detail/qualifier.hpp" -#include "../detail/_fixes.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_matrix_transform extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_matrix_common - /// @{ - - template - GLM_FUNC_DECL mat mix(mat const& x, mat const& y, mat const& a); - - template - GLM_FUNC_DECL mat mix(mat const& x, mat const& y, U a); - - /// @} -}//namespace glm - -#include "matrix_common.inl" diff --git a/third_party/glm/ext/matrix_common.inl b/third_party/glm/ext/matrix_common.inl deleted file mode 100755 index 9d50848..0000000 --- a/third_party/glm/ext/matrix_common.inl +++ /dev/null @@ -1,16 +0,0 @@ -#include "../matrix.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat mix(mat const& x, mat const& y, U a) - { - return mat(x) * (static_cast(1) - a) + mat(y) * a; - } - - template - GLM_FUNC_QUALIFIER mat mix(mat const& x, mat const& y, mat const& a) - { - return matrixCompMult(mat(x), static_cast(1) - a) + matrixCompMult(mat(y), a); - } -}//namespace glm diff --git a/third_party/glm/ext/matrix_double2x2.hpp b/third_party/glm/ext/matrix_double2x2.hpp deleted file mode 100755 index 94dca54..0000000 --- a/third_party/glm/ext/matrix_double2x2.hpp +++ /dev/null @@ -1,23 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double2x2.hpp - -#pragma once -#include "../detail/type_mat2x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 2 columns of 2 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 2, double, defaultp> dmat2x2; - - /// 2 columns of 2 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 2, double, defaultp> dmat2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double2x2_precision.hpp b/third_party/glm/ext/matrix_double2x2_precision.hpp deleted file mode 100755 index 9e2c174..0000000 --- a/third_party/glm/ext/matrix_double2x2_precision.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double2x2_precision.hpp - -#pragma once -#include "../detail/type_mat2x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 2 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, double, lowp> lowp_dmat2; - - /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, double, mediump> mediump_dmat2; - - /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, double, highp> highp_dmat2; - - /// 2 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, double, lowp> lowp_dmat2x2; - - /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, double, mediump> mediump_dmat2x2; - - /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, double, highp> highp_dmat2x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double2x3.hpp b/third_party/glm/ext/matrix_double2x3.hpp deleted file mode 100755 index bfef87a..0000000 --- a/third_party/glm/ext/matrix_double2x3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double2x3.hpp - -#pragma once -#include "../detail/type_mat2x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 2 columns of 3 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 3, double, defaultp> dmat2x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double2x3_precision.hpp b/third_party/glm/ext/matrix_double2x3_precision.hpp deleted file mode 100755 index 098fb60..0000000 --- a/third_party/glm/ext/matrix_double2x3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double2x3_precision.hpp - -#pragma once -#include "../detail/type_mat2x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 2 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 3, double, lowp> lowp_dmat2x3; - - /// 2 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 3, double, mediump> mediump_dmat2x3; - - /// 2 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 3, double, highp> highp_dmat2x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double2x4.hpp b/third_party/glm/ext/matrix_double2x4.hpp deleted file mode 100755 index 499284b..0000000 --- a/third_party/glm/ext/matrix_double2x4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double2x4.hpp - -#pragma once -#include "../detail/type_mat2x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 2 columns of 4 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 4, double, defaultp> dmat2x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double2x4_precision.hpp b/third_party/glm/ext/matrix_double2x4_precision.hpp deleted file mode 100755 index 9b61ebc..0000000 --- a/third_party/glm/ext/matrix_double2x4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double2x4_precision.hpp - -#pragma once -#include "../detail/type_mat2x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 2 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 4, double, lowp> lowp_dmat2x4; - - /// 2 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 4, double, mediump> mediump_dmat2x4; - - /// 2 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 4, double, highp> highp_dmat2x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double3x2.hpp b/third_party/glm/ext/matrix_double3x2.hpp deleted file mode 100755 index dd23f36..0000000 --- a/third_party/glm/ext/matrix_double3x2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double3x2.hpp - -#pragma once -#include "../detail/type_mat3x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 3 columns of 2 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 2, double, defaultp> dmat3x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double3x2_precision.hpp b/third_party/glm/ext/matrix_double3x2_precision.hpp deleted file mode 100755 index 068d9e9..0000000 --- a/third_party/glm/ext/matrix_double3x2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double3x2_precision.hpp - -#pragma once -#include "../detail/type_mat3x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 3 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 2, double, lowp> lowp_dmat3x2; - - /// 3 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 2, double, mediump> mediump_dmat3x2; - - /// 3 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 2, double, highp> highp_dmat3x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double3x3.hpp b/third_party/glm/ext/matrix_double3x3.hpp deleted file mode 100755 index 53572b7..0000000 --- a/third_party/glm/ext/matrix_double3x3.hpp +++ /dev/null @@ -1,23 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double3x3.hpp - -#pragma once -#include "../detail/type_mat3x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 3 columns of 3 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 3, double, defaultp> dmat3x3; - - /// 3 columns of 3 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 3, double, defaultp> dmat3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double3x3_precision.hpp b/third_party/glm/ext/matrix_double3x3_precision.hpp deleted file mode 100755 index 8691e78..0000000 --- a/third_party/glm/ext/matrix_double3x3_precision.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double3x3_precision.hpp - -#pragma once -#include "../detail/type_mat3x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 3 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, double, lowp> lowp_dmat3; - - /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, double, mediump> mediump_dmat3; - - /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, double, highp> highp_dmat3; - - /// 3 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, double, lowp> lowp_dmat3x3; - - /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, double, mediump> mediump_dmat3x3; - - /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, double, highp> highp_dmat3x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double3x4.hpp b/third_party/glm/ext/matrix_double3x4.hpp deleted file mode 100755 index c572d63..0000000 --- a/third_party/glm/ext/matrix_double3x4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double3x4.hpp - -#pragma once -#include "../detail/type_mat3x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 3 columns of 4 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 4, double, defaultp> dmat3x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double3x4_precision.hpp b/third_party/glm/ext/matrix_double3x4_precision.hpp deleted file mode 100755 index f040217..0000000 --- a/third_party/glm/ext/matrix_double3x4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double3x4_precision.hpp - -#pragma once -#include "../detail/type_mat3x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 3 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 4, double, lowp> lowp_dmat3x4; - - /// 3 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 4, double, mediump> mediump_dmat3x4; - - /// 3 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 4, double, highp> highp_dmat3x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double4x2.hpp b/third_party/glm/ext/matrix_double4x2.hpp deleted file mode 100755 index 9b229f4..0000000 --- a/third_party/glm/ext/matrix_double4x2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double4x2.hpp - -#pragma once -#include "../detail/type_mat4x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 4 columns of 2 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 2, double, defaultp> dmat4x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double4x2_precision.hpp b/third_party/glm/ext/matrix_double4x2_precision.hpp deleted file mode 100755 index 6ad18ba..0000000 --- a/third_party/glm/ext/matrix_double4x2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double4x2_precision.hpp - -#pragma once -#include "../detail/type_mat4x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 4 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 2, double, lowp> lowp_dmat4x2; - - /// 4 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 2, double, mediump> mediump_dmat4x2; - - /// 4 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 2, double, highp> highp_dmat4x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double4x3.hpp b/third_party/glm/ext/matrix_double4x3.hpp deleted file mode 100755 index dca4cf9..0000000 --- a/third_party/glm/ext/matrix_double4x3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double4x3.hpp - -#pragma once -#include "../detail/type_mat4x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 4 columns of 3 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 3, double, defaultp> dmat4x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double4x3_precision.hpp b/third_party/glm/ext/matrix_double4x3_precision.hpp deleted file mode 100755 index f7371de..0000000 --- a/third_party/glm/ext/matrix_double4x3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double4x3_precision.hpp - -#pragma once -#include "../detail/type_mat4x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 4 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 3, double, lowp> lowp_dmat4x3; - - /// 4 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 3, double, mediump> mediump_dmat4x3; - - /// 4 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 3, double, highp> highp_dmat4x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double4x4.hpp b/third_party/glm/ext/matrix_double4x4.hpp deleted file mode 100755 index 81e1bf6..0000000 --- a/third_party/glm/ext/matrix_double4x4.hpp +++ /dev/null @@ -1,23 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double4x4.hpp - -#pragma once -#include "../detail/type_mat4x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 4 columns of 4 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 4, double, defaultp> dmat4x4; - - /// 4 columns of 4 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 4, double, defaultp> dmat4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double4x4_precision.hpp b/third_party/glm/ext/matrix_double4x4_precision.hpp deleted file mode 100755 index 4c36a84..0000000 --- a/third_party/glm/ext/matrix_double4x4_precision.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double4x4_precision.hpp - -#pragma once -#include "../detail/type_mat4x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 4 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, double, lowp> lowp_dmat4; - - /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, double, mediump> mediump_dmat4; - - /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, double, highp> highp_dmat4; - - /// 4 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, double, lowp> lowp_dmat4x4; - - /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, double, mediump> mediump_dmat4x4; - - /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, double, highp> highp_dmat4x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float2x2.hpp b/third_party/glm/ext/matrix_float2x2.hpp deleted file mode 100755 index 53df921..0000000 --- a/third_party/glm/ext/matrix_float2x2.hpp +++ /dev/null @@ -1,23 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float2x2.hpp - -#pragma once -#include "../detail/type_mat2x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 2 columns of 2 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 2, float, defaultp> mat2x2; - - /// 2 columns of 2 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 2, float, defaultp> mat2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float2x2_precision.hpp b/third_party/glm/ext/matrix_float2x2_precision.hpp deleted file mode 100755 index 898b6db..0000000 --- a/third_party/glm/ext/matrix_float2x2_precision.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float2x2_precision.hpp - -#pragma once -#include "../detail/type_mat2x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 2 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, float, lowp> lowp_mat2; - - /// 2 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, float, mediump> mediump_mat2; - - /// 2 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, float, highp> highp_mat2; - - /// 2 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, float, lowp> lowp_mat2x2; - - /// 2 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, float, mediump> mediump_mat2x2; - - /// 2 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, float, highp> highp_mat2x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float2x3.hpp b/third_party/glm/ext/matrix_float2x3.hpp deleted file mode 100755 index 6f68822..0000000 --- a/third_party/glm/ext/matrix_float2x3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float2x3.hpp - -#pragma once -#include "../detail/type_mat2x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 2 columns of 3 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 3, float, defaultp> mat2x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float2x3_precision.hpp b/third_party/glm/ext/matrix_float2x3_precision.hpp deleted file mode 100755 index 50c1032..0000000 --- a/third_party/glm/ext/matrix_float2x3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float2x3_precision.hpp - -#pragma once -#include "../detail/type_mat2x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 2 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 3, float, lowp> lowp_mat2x3; - - /// 2 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 3, float, mediump> mediump_mat2x3; - - /// 2 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 3, float, highp> highp_mat2x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float2x4.hpp b/third_party/glm/ext/matrix_float2x4.hpp deleted file mode 100755 index 30f30de..0000000 --- a/third_party/glm/ext/matrix_float2x4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float2x4.hpp - -#pragma once -#include "../detail/type_mat2x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 2 columns of 4 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 4, float, defaultp> mat2x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float2x4_precision.hpp b/third_party/glm/ext/matrix_float2x4_precision.hpp deleted file mode 100755 index 079d638..0000000 --- a/third_party/glm/ext/matrix_float2x4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float2x4_precision.hpp - -#pragma once -#include "../detail/type_mat2x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 2 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 4, float, lowp> lowp_mat2x4; - - /// 2 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 4, float, mediump> mediump_mat2x4; - - /// 2 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 4, float, highp> highp_mat2x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float3x2.hpp b/third_party/glm/ext/matrix_float3x2.hpp deleted file mode 100755 index d39dd2f..0000000 --- a/third_party/glm/ext/matrix_float3x2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float3x2.hpp - -#pragma once -#include "../detail/type_mat3x2.hpp" - -namespace glm -{ - /// @addtogroup core - /// @{ - - /// 3 columns of 2 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 2, float, defaultp> mat3x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float3x2_precision.hpp b/third_party/glm/ext/matrix_float3x2_precision.hpp deleted file mode 100755 index 8572c2a..0000000 --- a/third_party/glm/ext/matrix_float3x2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float3x2_precision.hpp - -#pragma once -#include "../detail/type_mat3x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 3 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 2, float, lowp> lowp_mat3x2; - - /// 3 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 2, float, mediump> mediump_mat3x2; - - /// 3 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 2, float, highp> highp_mat3x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float3x3.hpp b/third_party/glm/ext/matrix_float3x3.hpp deleted file mode 100755 index 177d809..0000000 --- a/third_party/glm/ext/matrix_float3x3.hpp +++ /dev/null @@ -1,23 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float3x3.hpp - -#pragma once -#include "../detail/type_mat3x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 3 columns of 3 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 3, float, defaultp> mat3x3; - - /// 3 columns of 3 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 3, float, defaultp> mat3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float3x3_precision.hpp b/third_party/glm/ext/matrix_float3x3_precision.hpp deleted file mode 100755 index 8a900c1..0000000 --- a/third_party/glm/ext/matrix_float3x3_precision.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float3x3_precision.hpp - -#pragma once -#include "../detail/type_mat3x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 3 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, float, lowp> lowp_mat3; - - /// 3 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, float, mediump> mediump_mat3; - - /// 3 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, float, highp> highp_mat3; - - /// 3 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, float, lowp> lowp_mat3x3; - - /// 3 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, float, mediump> mediump_mat3x3; - - /// 3 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, float, highp> highp_mat3x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float3x4.hpp b/third_party/glm/ext/matrix_float3x4.hpp deleted file mode 100755 index 64b8459..0000000 --- a/third_party/glm/ext/matrix_float3x4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float3x4.hpp - -#pragma once -#include "../detail/type_mat3x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 3 columns of 4 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 4, float, defaultp> mat3x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float3x4_precision.hpp b/third_party/glm/ext/matrix_float3x4_precision.hpp deleted file mode 100755 index bc36bf1..0000000 --- a/third_party/glm/ext/matrix_float3x4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float3x4_precision.hpp - -#pragma once -#include "../detail/type_mat3x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 3 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 4, float, lowp> lowp_mat3x4; - - /// 3 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 4, float, mediump> mediump_mat3x4; - - /// 3 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 4, float, highp> highp_mat3x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float4x2.hpp b/third_party/glm/ext/matrix_float4x2.hpp deleted file mode 100755 index 1ed5227..0000000 --- a/third_party/glm/ext/matrix_float4x2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float4x2.hpp - -#pragma once -#include "../detail/type_mat4x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 4 columns of 2 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 2, float, defaultp> mat4x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float4x2_precision.hpp b/third_party/glm/ext/matrix_float4x2_precision.hpp deleted file mode 100755 index 88fd069..0000000 --- a/third_party/glm/ext/matrix_float4x2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float2x2_precision.hpp - -#pragma once -#include "../detail/type_mat2x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 4 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 2, float, lowp> lowp_mat4x2; - - /// 4 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 2, float, mediump> mediump_mat4x2; - - /// 4 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 2, float, highp> highp_mat4x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float4x3.hpp b/third_party/glm/ext/matrix_float4x3.hpp deleted file mode 100755 index 5dbe765..0000000 --- a/third_party/glm/ext/matrix_float4x3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float4x3.hpp - -#pragma once -#include "../detail/type_mat4x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 4 columns of 3 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 3, float, defaultp> mat4x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float4x3_precision.hpp b/third_party/glm/ext/matrix_float4x3_precision.hpp deleted file mode 100755 index 846ed4f..0000000 --- a/third_party/glm/ext/matrix_float4x3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float4x3_precision.hpp - -#pragma once -#include "../detail/type_mat4x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 4 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 3, float, lowp> lowp_mat4x3; - - /// 4 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 3, float, mediump> mediump_mat4x3; - - /// 4 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 3, float, highp> highp_mat4x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float4x4.hpp b/third_party/glm/ext/matrix_float4x4.hpp deleted file mode 100755 index 5ba111d..0000000 --- a/third_party/glm/ext/matrix_float4x4.hpp +++ /dev/null @@ -1,23 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float4x4.hpp - -#pragma once -#include "../detail/type_mat4x4.hpp" - -namespace glm -{ - /// @ingroup core_matrix - /// @{ - - /// 4 columns of 4 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 4, float, defaultp> mat4x4; - - /// 4 columns of 4 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 4, float, defaultp> mat4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float4x4_precision.hpp b/third_party/glm/ext/matrix_float4x4_precision.hpp deleted file mode 100755 index 597149b..0000000 --- a/third_party/glm/ext/matrix_float4x4_precision.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float4x4_precision.hpp - -#pragma once -#include "../detail/type_mat4x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 4 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, float, lowp> lowp_mat4; - - /// 4 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, float, mediump> mediump_mat4; - - /// 4 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, float, highp> highp_mat4; - - /// 4 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, float, lowp> lowp_mat4x4; - - /// 4 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, float, mediump> mediump_mat4x4; - - /// 4 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, float, highp> highp_mat4x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_projection.hpp b/third_party/glm/ext/matrix_projection.hpp deleted file mode 100755 index 51fd01b..0000000 --- a/third_party/glm/ext/matrix_projection.hpp +++ /dev/null @@ -1,149 +0,0 @@ -/// @ref ext_matrix_projection -/// @file glm/ext/matrix_projection.hpp -/// -/// @defgroup ext_matrix_projection GLM_EXT_matrix_projection -/// @ingroup ext -/// -/// Functions that generate common projection transformation matrices. -/// -/// The matrices generated by this extension use standard OpenGL fixed-function -/// conventions. For example, the lookAt function generates a transform from world -/// space into the specific eye space that the projective matrix functions -/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility -/// specifications defines the particular layout of this eye space. -/// -/// Include to use the features of this extension. -/// -/// @see ext_matrix_transform -/// @see ext_matrix_clip_space - -#pragma once - -// Dependencies -#include "../gtc/constants.hpp" -#include "../geometric.hpp" -#include "../trigonometric.hpp" -#include "../matrix.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_matrix_projection extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_matrix_projection - /// @{ - - /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param obj Specify the object coordinates. - /// @param model Specifies the current modelview matrix - /// @param proj Specifies the current projection matrix - /// @param viewport Specifies the current viewport - /// @return Return the computed window coordinates. - /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. - /// @tparam U Currently supported: Floating-point types and integer types. - /// - /// @see gluProject man page - template - GLM_FUNC_DECL vec<3, T, Q> projectZO( - vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); - - /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param obj Specify the object coordinates. - /// @param model Specifies the current modelview matrix - /// @param proj Specifies the current projection matrix - /// @param viewport Specifies the current viewport - /// @return Return the computed window coordinates. - /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. - /// @tparam U Currently supported: Floating-point types and integer types. - /// - /// @see gluProject man page - template - GLM_FUNC_DECL vec<3, T, Q> projectNO( - vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); - - /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates using default near and far clip planes definition. - /// To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. - /// - /// @param obj Specify the object coordinates. - /// @param model Specifies the current modelview matrix - /// @param proj Specifies the current projection matrix - /// @param viewport Specifies the current viewport - /// @return Return the computed window coordinates. - /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. - /// @tparam U Currently supported: Floating-point types and integer types. - /// - /// @see gluProject man page - template - GLM_FUNC_DECL vec<3, T, Q> project( - vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); - - /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param win Specify the window coordinates to be mapped. - /// @param model Specifies the modelview matrix - /// @param proj Specifies the projection matrix - /// @param viewport Specifies the viewport - /// @return Returns the computed object coordinates. - /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. - /// @tparam U Currently supported: Floating-point types and integer types. - /// - /// @see gluUnProject man page - template - GLM_FUNC_DECL vec<3, T, Q> unProjectZO( - vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); - - /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param win Specify the window coordinates to be mapped. - /// @param model Specifies the modelview matrix - /// @param proj Specifies the projection matrix - /// @param viewport Specifies the viewport - /// @return Returns the computed object coordinates. - /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. - /// @tparam U Currently supported: Floating-point types and integer types. - /// - /// @see gluUnProject man page - template - GLM_FUNC_DECL vec<3, T, Q> unProjectNO( - vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); - - /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates using default near and far clip planes definition. - /// To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. - /// - /// @param win Specify the window coordinates to be mapped. - /// @param model Specifies the modelview matrix - /// @param proj Specifies the projection matrix - /// @param viewport Specifies the viewport - /// @return Returns the computed object coordinates. - /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. - /// @tparam U Currently supported: Floating-point types and integer types. - /// - /// @see gluUnProject man page - template - GLM_FUNC_DECL vec<3, T, Q> unProject( - vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); - - /// Define a picking region - /// - /// @param center Specify the center of a picking region in window coordinates. - /// @param delta Specify the width and height, respectively, of the picking region in window coordinates. - /// @param viewport Rendering viewport - /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. - /// @tparam U Currently supported: Floating-point types and integer types. - /// - /// @see gluPickMatrix man page - template - GLM_FUNC_DECL mat<4, 4, T, Q> pickMatrix( - vec<2, T, Q> const& center, vec<2, T, Q> const& delta, vec<4, U, Q> const& viewport); - - /// @} -}//namespace glm - -#include "matrix_projection.inl" diff --git a/third_party/glm/ext/matrix_projection.inl b/third_party/glm/ext/matrix_projection.inl deleted file mode 100755 index 8b4eea9..0000000 --- a/third_party/glm/ext/matrix_projection.inl +++ /dev/null @@ -1,104 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> projectZO(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) - { - vec<4, T, Q> tmp = vec<4, T, Q>(obj, static_cast(1)); - tmp = model * tmp; - tmp = proj * tmp; - - tmp /= tmp.w; - tmp.x = tmp.x * static_cast(0.5) + static_cast(0.5); - tmp.y = tmp.y * static_cast(0.5) + static_cast(0.5); - - tmp[0] = tmp[0] * T(viewport[2]) + T(viewport[0]); - tmp[1] = tmp[1] * T(viewport[3]) + T(viewport[1]); - - return vec<3, T, Q>(tmp); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> projectNO(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) - { - vec<4, T, Q> tmp = vec<4, T, Q>(obj, static_cast(1)); - tmp = model * tmp; - tmp = proj * tmp; - - tmp /= tmp.w; - tmp = tmp * static_cast(0.5) + static_cast(0.5); - tmp[0] = tmp[0] * T(viewport[2]) + T(viewport[0]); - tmp[1] = tmp[1] * T(viewport[3]) + T(viewport[1]); - - return vec<3, T, Q>(tmp); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> project(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) - { - if(GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT) - return projectZO(obj, model, proj, viewport); - else - return projectNO(obj, model, proj, viewport); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectZO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) - { - mat<4, 4, T, Q> Inverse = inverse(proj * model); - - vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1)); - tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]); - tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]); - tmp.x = tmp.x * static_cast(2) - static_cast(1); - tmp.y = tmp.y * static_cast(2) - static_cast(1); - - vec<4, T, Q> obj = Inverse * tmp; - obj /= obj.w; - - return vec<3, T, Q>(obj); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectNO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) - { - mat<4, 4, T, Q> Inverse = inverse(proj * model); - - vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1)); - tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]); - tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]); - tmp = tmp * static_cast(2) - static_cast(1); - - vec<4, T, Q> obj = Inverse * tmp; - obj /= obj.w; - - return vec<3, T, Q>(obj); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> unProject(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) - { - if(GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT) - return unProjectZO(win, model, proj, viewport); - else - return unProjectNO(win, model, proj, viewport); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> pickMatrix(vec<2, T, Q> const& center, vec<2, T, Q> const& delta, vec<4, U, Q> const& viewport) - { - assert(delta.x > static_cast(0) && delta.y > static_cast(0)); - mat<4, 4, T, Q> Result(static_cast(1)); - - if(!(delta.x > static_cast(0) && delta.y > static_cast(0))) - return Result; // Error - - vec<3, T, Q> Temp( - (static_cast(viewport[2]) - static_cast(2) * (center.x - static_cast(viewport[0]))) / delta.x, - (static_cast(viewport[3]) - static_cast(2) * (center.y - static_cast(viewport[1]))) / delta.y, - static_cast(0)); - - // Translate and scale the picked region to the entire window - Result = translate(Result, Temp); - return scale(Result, vec<3, T, Q>(static_cast(viewport[2]) / delta.x, static_cast(viewport[3]) / delta.y, static_cast(1))); - } -}//namespace glm diff --git a/third_party/glm/ext/matrix_relational.hpp b/third_party/glm/ext/matrix_relational.hpp deleted file mode 100755 index 20023ad..0000000 --- a/third_party/glm/ext/matrix_relational.hpp +++ /dev/null @@ -1,132 +0,0 @@ -/// @ref ext_matrix_relational -/// @file glm/ext/matrix_relational.hpp -/// -/// @defgroup ext_matrix_relational GLM_EXT_matrix_relational -/// @ingroup ext -/// -/// Exposes comparison functions for matrix types that take a user defined epsilon values. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_relational -/// @see ext_scalar_relational -/// @see ext_quaternion_relational - -#pragma once - -// Dependencies -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_matrix_relational extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_matrix_relational - /// @{ - - /// Perform a component-wise equal-to comparison of two matrices. - /// Return a boolean vector which components value is True if this expression is satisfied per column of the matrices. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y); - - /// Perform a component-wise not-equal-to comparison of two matrices. - /// Return a boolean vector which components value is True if this expression is satisfied per column of the matrices. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y); - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, T epsilon); - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, vec const& epsilon); - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is not satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, T epsilon); - - /// Returns the component-wise comparison of |x - y| >= epsilon. - /// True if this expression is not satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, vec const& epsilon); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, int ULPs); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, vec const& ULPs); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is not satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, int ULPs); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is not satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, vec const& ULPs); - - /// @} -}//namespace glm - -#include "matrix_relational.inl" diff --git a/third_party/glm/ext/matrix_relational.inl b/third_party/glm/ext/matrix_relational.inl deleted file mode 100755 index b2b8753..0000000 --- a/third_party/glm/ext/matrix_relational.inl +++ /dev/null @@ -1,82 +0,0 @@ -/// @ref ext_vector_relational -/// @file glm/ext/vector_relational.inl - -// Dependency: -#include "../ext/vector_relational.hpp" -#include "../common.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b) - { - return equal(a, b, static_cast(0)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, T Epsilon) - { - return equal(a, b, vec(Epsilon)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, vec const& Epsilon) - { - vec Result(true); - for(length_t i = 0; i < C; ++i) - Result[i] = all(equal(a[i], b[i], Epsilon[i])); - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y) - { - return notEqual(x, y, static_cast(0)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, T Epsilon) - { - return notEqual(x, y, vec(Epsilon)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& a, mat const& b, vec const& Epsilon) - { - vec Result(true); - for(length_t i = 0; i < C; ++i) - Result[i] = any(notEqual(a[i], b[i], Epsilon[i])); - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, int MaxULPs) - { - return equal(a, b, vec(MaxULPs)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, vec const& MaxULPs) - { - vec Result(true); - for(length_t i = 0; i < C; ++i) - Result[i] = all(equal(a[i], b[i], MaxULPs[i])); - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, int MaxULPs) - { - return notEqual(x, y, vec(MaxULPs)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& a, mat const& b, vec const& MaxULPs) - { - vec Result(true); - for(length_t i = 0; i < C; ++i) - Result[i] = any(notEqual(a[i], b[i], MaxULPs[i])); - return Result; - } - -}//namespace glm diff --git a/third_party/glm/ext/matrix_transform.hpp b/third_party/glm/ext/matrix_transform.hpp deleted file mode 100755 index cbd187e..0000000 --- a/third_party/glm/ext/matrix_transform.hpp +++ /dev/null @@ -1,144 +0,0 @@ -/// @ref ext_matrix_transform -/// @file glm/ext/matrix_transform.hpp -/// -/// @defgroup ext_matrix_transform GLM_EXT_matrix_transform -/// @ingroup ext -/// -/// Defines functions that generate common transformation matrices. -/// -/// The matrices generated by this extension use standard OpenGL fixed-function -/// conventions. For example, the lookAt function generates a transform from world -/// space into the specific eye space that the projective matrix functions -/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility -/// specifications defines the particular layout of this eye space. -/// -/// Include to use the features of this extension. -/// -/// @see ext_matrix_projection -/// @see ext_matrix_clip_space - -#pragma once - -// Dependencies -#include "../gtc/constants.hpp" -#include "../geometric.hpp" -#include "../trigonometric.hpp" -#include "../matrix.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_matrix_transform extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_matrix_transform - /// @{ - - /// Builds an identity matrix. - template - GLM_FUNC_DECL GLM_CONSTEXPR genType identity(); - - /// Builds a translation 4 * 4 matrix created from a vector of 3 components. - /// - /// @param m Input matrix multiplied by this translation matrix. - /// @param v Coordinates of a translation vector. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - /// - /// @code - /// #include - /// #include - /// ... - /// glm::mat4 m = glm::translate(glm::mat4(1.0f), glm::vec3(1.0f)); - /// // m[0][0] == 1.0f, m[0][1] == 0.0f, m[0][2] == 0.0f, m[0][3] == 0.0f - /// // m[1][0] == 0.0f, m[1][1] == 1.0f, m[1][2] == 0.0f, m[1][3] == 0.0f - /// // m[2][0] == 0.0f, m[2][1] == 0.0f, m[2][2] == 1.0f, m[2][3] == 0.0f - /// // m[3][0] == 1.0f, m[3][1] == 1.0f, m[3][2] == 1.0f, m[3][3] == 1.0f - /// @endcode - /// - /// @see - translate(mat<4, 4, T, Q> const& m, T x, T y, T z) - /// @see - translate(vec<3, T, Q> const& v) - /// @see glTranslate man page - template - GLM_FUNC_DECL mat<4, 4, T, Q> translate( - mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v); - - /// Builds a rotation 4 * 4 matrix created from an axis vector and an angle. - /// - /// @param m Input matrix multiplied by this rotation matrix. - /// @param angle Rotation angle expressed in radians. - /// @param axis Rotation axis, recommended to be normalized. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - /// - /// @see - rotate(mat<4, 4, T, Q> const& m, T angle, T x, T y, T z) - /// @see - rotate(T angle, vec<3, T, Q> const& v) - /// @see glRotate man page - template - GLM_FUNC_DECL mat<4, 4, T, Q> rotate( - mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& axis); - - /// Builds a scale 4 * 4 matrix created from 3 scalars. - /// - /// @param m Input matrix multiplied by this scale matrix. - /// @param v Ratio of scaling for each axis. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - /// - /// @see - scale(mat<4, 4, T, Q> const& m, T x, T y, T z) - /// @see - scale(vec<3, T, Q> const& v) - /// @see glScale man page - template - GLM_FUNC_DECL mat<4, 4, T, Q> scale( - mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v); - - /// Build a right handed look at view matrix. - /// - /// @param eye Position of the camera - /// @param center Position where the camera is looking at - /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1) - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - /// - /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) - template - GLM_FUNC_DECL mat<4, 4, T, Q> lookAtRH( - vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up); - - /// Build a left handed look at view matrix. - /// - /// @param eye Position of the camera - /// @param center Position where the camera is looking at - /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1) - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - /// - /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) - template - GLM_FUNC_DECL mat<4, 4, T, Q> lookAtLH( - vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up); - - /// Build a look at view matrix based on the default handedness. - /// - /// @param eye Position of the camera - /// @param center Position where the camera is looking at - /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1) - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - /// - /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) - /// @see gluLookAt man page - template - GLM_FUNC_DECL mat<4, 4, T, Q> lookAt( - vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up); - - /// @} -}//namespace glm - -#include "matrix_transform.inl" diff --git a/third_party/glm/ext/matrix_transform.inl b/third_party/glm/ext/matrix_transform.inl deleted file mode 100755 index a415157..0000000 --- a/third_party/glm/ext/matrix_transform.inl +++ /dev/null @@ -1,152 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType identity() - { - return detail::init_gentype::GENTYPE>::identity(); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> translate(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v) - { - mat<4, 4, T, Q> Result(m); - Result[3] = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate(mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& v) - { - T const a = angle; - T const c = cos(a); - T const s = sin(a); - - vec<3, T, Q> axis(normalize(v)); - vec<3, T, Q> temp((T(1) - c) * axis); - - mat<4, 4, T, Q> Rotate; - Rotate[0][0] = c + temp[0] * axis[0]; - Rotate[0][1] = temp[0] * axis[1] + s * axis[2]; - Rotate[0][2] = temp[0] * axis[2] - s * axis[1]; - - Rotate[1][0] = temp[1] * axis[0] - s * axis[2]; - Rotate[1][1] = c + temp[1] * axis[1]; - Rotate[1][2] = temp[1] * axis[2] + s * axis[0]; - - Rotate[2][0] = temp[2] * axis[0] + s * axis[1]; - Rotate[2][1] = temp[2] * axis[1] - s * axis[0]; - Rotate[2][2] = c + temp[2] * axis[2]; - - mat<4, 4, T, Q> Result; - Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2]; - Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2]; - Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2]; - Result[3] = m[3]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate_slow(mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& v) - { - T const a = angle; - T const c = cos(a); - T const s = sin(a); - mat<4, 4, T, Q> Result; - - vec<3, T, Q> axis = normalize(v); - - Result[0][0] = c + (static_cast(1) - c) * axis.x * axis.x; - Result[0][1] = (static_cast(1) - c) * axis.x * axis.y + s * axis.z; - Result[0][2] = (static_cast(1) - c) * axis.x * axis.z - s * axis.y; - Result[0][3] = static_cast(0); - - Result[1][0] = (static_cast(1) - c) * axis.y * axis.x - s * axis.z; - Result[1][1] = c + (static_cast(1) - c) * axis.y * axis.y; - Result[1][2] = (static_cast(1) - c) * axis.y * axis.z + s * axis.x; - Result[1][3] = static_cast(0); - - Result[2][0] = (static_cast(1) - c) * axis.z * axis.x + s * axis.y; - Result[2][1] = (static_cast(1) - c) * axis.z * axis.y - s * axis.x; - Result[2][2] = c + (static_cast(1) - c) * axis.z * axis.z; - Result[2][3] = static_cast(0); - - Result[3] = vec<4, T, Q>(0, 0, 0, 1); - return m * Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v) - { - mat<4, 4, T, Q> Result; - Result[0] = m[0] * v[0]; - Result[1] = m[1] * v[1]; - Result[2] = m[2] * v[2]; - Result[3] = m[3]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale_slow(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v) - { - mat<4, 4, T, Q> Result(T(1)); - Result[0][0] = v.x; - Result[1][1] = v.y; - Result[2][2] = v.z; - return m * Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAtRH(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up) - { - vec<3, T, Q> const f(normalize(center - eye)); - vec<3, T, Q> const s(normalize(cross(f, up))); - vec<3, T, Q> const u(cross(s, f)); - - mat<4, 4, T, Q> Result(1); - Result[0][0] = s.x; - Result[1][0] = s.y; - Result[2][0] = s.z; - Result[0][1] = u.x; - Result[1][1] = u.y; - Result[2][1] = u.z; - Result[0][2] =-f.x; - Result[1][2] =-f.y; - Result[2][2] =-f.z; - Result[3][0] =-dot(s, eye); - Result[3][1] =-dot(u, eye); - Result[3][2] = dot(f, eye); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAtLH(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up) - { - vec<3, T, Q> const f(normalize(center - eye)); - vec<3, T, Q> const s(normalize(cross(up, f))); - vec<3, T, Q> const u(cross(f, s)); - - mat<4, 4, T, Q> Result(1); - Result[0][0] = s.x; - Result[1][0] = s.y; - Result[2][0] = s.z; - Result[0][1] = u.x; - Result[1][1] = u.y; - Result[2][1] = u.z; - Result[0][2] = f.x; - Result[1][2] = f.y; - Result[2][2] = f.z; - Result[3][0] = -dot(s, eye); - Result[3][1] = -dot(u, eye); - Result[3][2] = -dot(f, eye); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAt(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up) - { - GLM_IF_CONSTEXPR(GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT) - return lookAtLH(eye, center, up); - else - return lookAtRH(eye, center, up); - } -}//namespace glm diff --git a/third_party/glm/ext/quaternion_common.hpp b/third_party/glm/ext/quaternion_common.hpp deleted file mode 100755 index 2980ed4..0000000 --- a/third_party/glm/ext/quaternion_common.hpp +++ /dev/null @@ -1,120 +0,0 @@ -/// @ref ext_quaternion_common -/// @file glm/ext/quaternion_common.hpp -/// -/// @defgroup ext_quaternion_common GLM_EXT_quaternion_common -/// @ingroup ext -/// -/// Provides common functions for quaternion types -/// -/// Include to use the features of this extension. -/// -/// @see ext_scalar_common -/// @see ext_vector_common -/// @see ext_quaternion_float -/// @see ext_quaternion_double -/// @see ext_quaternion_exponential -/// @see ext_quaternion_geometric -/// @see ext_quaternion_relational -/// @see ext_quaternion_trigonometric -/// @see ext_quaternion_transform - -#pragma once - -// Dependency: -#include "../ext/scalar_constants.hpp" -#include "../ext/quaternion_geometric.hpp" -#include "../common.hpp" -#include "../trigonometric.hpp" -#include "../exponential.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_common extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_common - /// @{ - - /// Spherical linear interpolation of two quaternions. - /// The interpolation is oriented and the rotation is performed at constant speed. - /// For short path spherical linear interpolation, use the slerp function. - /// - /// @param x A quaternion - /// @param y A quaternion - /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1]. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - /// - /// @see - slerp(qua const& x, qua const& y, T const& a) - template - GLM_FUNC_DECL qua mix(qua const& x, qua const& y, T a); - - /// Linear interpolation of two quaternions. - /// The interpolation is oriented. - /// - /// @param x A quaternion - /// @param y A quaternion - /// @param a Interpolation factor. The interpolation is defined in the range [0, 1]. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua lerp(qua const& x, qua const& y, T a); - - /// Spherical linear interpolation of two quaternions. - /// The interpolation always take the short path and the rotation is performed at constant speed. - /// - /// @param x A quaternion - /// @param y A quaternion - /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1]. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua slerp(qua const& x, qua const& y, T a); - - /// Returns the q conjugate. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua conjugate(qua const& q); - - /// Returns the q inverse. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua inverse(qua const& q); - - /// Returns true if x holds a NaN (not a number) - /// representation in the underlying implementation's set of - /// floating point representations. Returns false otherwise, - /// including for implementations with no NaN - /// representations. - /// - /// /!\ When using compiler fast math, this function may fail. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL vec<4, bool, Q> isnan(qua const& x); - - /// Returns true if x holds a positive infinity or negative - /// infinity representation in the underlying implementation's - /// set of floating point representations. Returns false - /// otherwise, including for implementations with no infinity - /// representations. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL vec<4, bool, Q> isinf(qua const& x); - - /// @} -} //namespace glm - -#include "quaternion_common.inl" diff --git a/third_party/glm/ext/quaternion_common.inl b/third_party/glm/ext/quaternion_common.inl deleted file mode 100755 index 3b2846f..0000000 --- a/third_party/glm/ext/quaternion_common.inl +++ /dev/null @@ -1,107 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER qua mix(qua const& x, qua const& y, T a) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'mix' only accept floating-point inputs"); - - T const cosTheta = dot(x, y); - - // Perform a linear interpolation when cosTheta is close to 1 to avoid side effect of sin(angle) becoming a zero denominator - if(cosTheta > static_cast(1) - epsilon()) - { - // Linear interpolation - return qua( - mix(x.w, y.w, a), - mix(x.x, y.x, a), - mix(x.y, y.y, a), - mix(x.z, y.z, a)); - } - else - { - // Essential Mathematics, page 467 - T angle = acos(cosTheta); - return (sin((static_cast(1) - a) * angle) * x + sin(a * angle) * y) / sin(angle); - } - } - - template - GLM_FUNC_QUALIFIER qua lerp(qua const& x, qua const& y, T a) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'lerp' only accept floating-point inputs"); - - // Lerp is only defined in [0, 1] - assert(a >= static_cast(0)); - assert(a <= static_cast(1)); - - return x * (static_cast(1) - a) + (y * a); - } - - template - GLM_FUNC_QUALIFIER qua slerp(qua const& x, qua const& y, T a) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'slerp' only accept floating-point inputs"); - - qua z = y; - - T cosTheta = dot(x, y); - - // If cosTheta < 0, the interpolation will take the long way around the sphere. - // To fix this, one quat must be negated. - if(cosTheta < static_cast(0)) - { - z = -y; - cosTheta = -cosTheta; - } - - // Perform a linear interpolation when cosTheta is close to 1 to avoid side effect of sin(angle) becoming a zero denominator - if(cosTheta > static_cast(1) - epsilon()) - { - // Linear interpolation - return qua( - mix(x.w, z.w, a), - mix(x.x, z.x, a), - mix(x.y, z.y, a), - mix(x.z, z.z, a)); - } - else - { - // Essential Mathematics, page 467 - T angle = acos(cosTheta); - return (sin((static_cast(1) - a) * angle) * x + sin(a * angle) * z) / sin(angle); - } - } - - template - GLM_FUNC_QUALIFIER qua conjugate(qua const& q) - { - return qua(q.w, -q.x, -q.y, -q.z); - } - - template - GLM_FUNC_QUALIFIER qua inverse(qua const& q) - { - return conjugate(q) / dot(q, q); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> isnan(qua const& q) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isnan' only accept floating-point inputs"); - - return vec<4, bool, Q>(isnan(q.x), isnan(q.y), isnan(q.z), isnan(q.w)); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> isinf(qua const& q) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isinf' only accept floating-point inputs"); - - return vec<4, bool, Q>(isinf(q.x), isinf(q.y), isinf(q.z), isinf(q.w)); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "quaternion_common_simd.inl" -#endif - diff --git a/third_party/glm/ext/quaternion_common_simd.inl b/third_party/glm/ext/quaternion_common_simd.inl deleted file mode 100755 index ddfc8a4..0000000 --- a/third_party/glm/ext/quaternion_common_simd.inl +++ /dev/null @@ -1,18 +0,0 @@ -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -namespace glm{ -namespace detail -{ - template - struct compute_dot, float, true> - { - static GLM_FUNC_QUALIFIER float call(qua const& x, qua const& y) - { - return _mm_cvtss_f32(glm_vec1_dot(x.data, y.data)); - } - }; -}//namespace detail -}//namespace glm - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT - diff --git a/third_party/glm/ext/quaternion_double.hpp b/third_party/glm/ext/quaternion_double.hpp deleted file mode 100755 index 63b24de..0000000 --- a/third_party/glm/ext/quaternion_double.hpp +++ /dev/null @@ -1,39 +0,0 @@ -/// @ref ext_quaternion_double -/// @file glm/ext/quaternion_double.hpp -/// -/// @defgroup ext_quaternion_double GLM_EXT_quaternion_double -/// @ingroup ext -/// -/// Exposes double-precision floating point quaternion type. -/// -/// Include to use the features of this extension. -/// -/// @see ext_quaternion_float -/// @see ext_quaternion_double_precision -/// @see ext_quaternion_common -/// @see ext_quaternion_exponential -/// @see ext_quaternion_geometric -/// @see ext_quaternion_relational -/// @see ext_quaternion_transform -/// @see ext_quaternion_trigonometric - -#pragma once - -// Dependency: -#include "../detail/type_quat.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_double extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_double - /// @{ - - /// Quaternion of double-precision floating-point numbers. - typedef qua dquat; - - /// @} -} //namespace glm - diff --git a/third_party/glm/ext/quaternion_double_precision.hpp b/third_party/glm/ext/quaternion_double_precision.hpp deleted file mode 100755 index 8aa24a1..0000000 --- a/third_party/glm/ext/quaternion_double_precision.hpp +++ /dev/null @@ -1,42 +0,0 @@ -/// @ref ext_quaternion_double_precision -/// @file glm/ext/quaternion_double_precision.hpp -/// -/// @defgroup ext_quaternion_double_precision GLM_EXT_quaternion_double_precision -/// @ingroup ext -/// -/// Exposes double-precision floating point quaternion type with various precision in term of ULPs. -/// -/// Include to use the features of this extension. - -#pragma once - -// Dependency: -#include "../detail/type_quat.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_double_precision extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_double_precision - /// @{ - - /// Quaternion of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see ext_quaternion_double_precision - typedef qua lowp_dquat; - - /// Quaternion of medium double-qualifier floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see ext_quaternion_double_precision - typedef qua mediump_dquat; - - /// Quaternion of high double-qualifier floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see ext_quaternion_double_precision - typedef qua highp_dquat; - - /// @} -} //namespace glm - diff --git a/third_party/glm/ext/quaternion_exponential.hpp b/third_party/glm/ext/quaternion_exponential.hpp deleted file mode 100755 index affe297..0000000 --- a/third_party/glm/ext/quaternion_exponential.hpp +++ /dev/null @@ -1,63 +0,0 @@ -/// @ref ext_quaternion_exponential -/// @file glm/ext/quaternion_exponential.hpp -/// -/// @defgroup ext_quaternion_exponential GLM_EXT_quaternion_exponential -/// @ingroup ext -/// -/// Provides exponential functions for quaternion types -/// -/// Include to use the features of this extension. -/// -/// @see core_exponential -/// @see ext_quaternion_float -/// @see ext_quaternion_double - -#pragma once - -// Dependency: -#include "../common.hpp" -#include "../trigonometric.hpp" -#include "../geometric.hpp" -#include "../ext/scalar_constants.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_exponential extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_transform - /// @{ - - /// Returns a exponential of a quaternion. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua exp(qua const& q); - - /// Returns a logarithm of a quaternion - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua log(qua const& q); - - /// Returns a quaternion raised to a power. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua pow(qua const& q, T y); - - /// Returns the square root of a quaternion - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua sqrt(qua const& q); - - /// @} -} //namespace glm - -#include "quaternion_exponential.inl" diff --git a/third_party/glm/ext/quaternion_exponential.inl b/third_party/glm/ext/quaternion_exponential.inl deleted file mode 100755 index 8456c00..0000000 --- a/third_party/glm/ext/quaternion_exponential.inl +++ /dev/null @@ -1,85 +0,0 @@ -#include "scalar_constants.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER qua exp(qua const& q) - { - vec<3, T, Q> u(q.x, q.y, q.z); - T const Angle = glm::length(u); - if (Angle < epsilon()) - return qua(); - - vec<3, T, Q> const v(u / Angle); - return qua(cos(Angle), sin(Angle) * v); - } - - template - GLM_FUNC_QUALIFIER qua log(qua const& q) - { - vec<3, T, Q> u(q.x, q.y, q.z); - T Vec3Len = length(u); - - if (Vec3Len < epsilon()) - { - if(q.w > static_cast(0)) - return qua(log(q.w), static_cast(0), static_cast(0), static_cast(0)); - else if(q.w < static_cast(0)) - return qua(log(-q.w), pi(), static_cast(0), static_cast(0)); - else - return qua(std::numeric_limits::infinity(), std::numeric_limits::infinity(), std::numeric_limits::infinity(), std::numeric_limits::infinity()); - } - else - { - T t = atan(Vec3Len, T(q.w)) / Vec3Len; - T QuatLen2 = Vec3Len * Vec3Len + q.w * q.w; - return qua(static_cast(0.5) * log(QuatLen2), t * q.x, t * q.y, t * q.z); - } - } - - template - GLM_FUNC_QUALIFIER qua pow(qua const& x, T y) - { - //Raising to the power of 0 should yield 1 - //Needed to prevent a division by 0 error later on - if(y > -epsilon() && y < epsilon()) - return qua(1,0,0,0); - - //To deal with non-unit quaternions - T magnitude = sqrt(x.x * x.x + x.y * x.y + x.z * x.z + x.w *x.w); - - T Angle; - if(abs(x.w / magnitude) > cos_one_over_two()) - { - //Scalar component is close to 1; using it to recover angle would lose precision - //Instead, we use the non-scalar components since sin() is accurate around 0 - - //Prevent a division by 0 error later on - T VectorMagnitude = x.x * x.x + x.y * x.y + x.z * x.z; - if (glm::abs(VectorMagnitude - static_cast(0)) < glm::epsilon()) { - //Equivalent to raising a real number to a power - return qua(pow(x.w, y), 0, 0, 0); - } - - Angle = asin(sqrt(VectorMagnitude) / magnitude); - } - else - { - //Scalar component is small, shouldn't cause loss of precision - Angle = acos(x.w / magnitude); - } - - T NewAngle = Angle * y; - T Div = sin(NewAngle) / sin(Angle); - T Mag = pow(magnitude, y - static_cast(1)); - return qua(cos(NewAngle) * magnitude * Mag, x.x * Div * Mag, x.y * Div * Mag, x.z * Div * Mag); - } - - template - GLM_FUNC_QUALIFIER qua sqrt(qua const& x) - { - return pow(x, static_cast(0.5)); - } -}//namespace glm - - diff --git a/third_party/glm/ext/quaternion_float.hpp b/third_party/glm/ext/quaternion_float.hpp deleted file mode 100755 index ca42a60..0000000 --- a/third_party/glm/ext/quaternion_float.hpp +++ /dev/null @@ -1,39 +0,0 @@ -/// @ref ext_quaternion_float -/// @file glm/ext/quaternion_float.hpp -/// -/// @defgroup ext_quaternion_float GLM_EXT_quaternion_float -/// @ingroup ext -/// -/// Exposes single-precision floating point quaternion type. -/// -/// Include to use the features of this extension. -/// -/// @see ext_quaternion_double -/// @see ext_quaternion_float_precision -/// @see ext_quaternion_common -/// @see ext_quaternion_exponential -/// @see ext_quaternion_geometric -/// @see ext_quaternion_relational -/// @see ext_quaternion_transform -/// @see ext_quaternion_trigonometric - -#pragma once - -// Dependency: -#include "../detail/type_quat.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_float extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_float - /// @{ - - /// Quaternion of single-precision floating-point numbers. - typedef qua quat; - - /// @} -} //namespace glm - diff --git a/third_party/glm/ext/quaternion_float_precision.hpp b/third_party/glm/ext/quaternion_float_precision.hpp deleted file mode 100755 index f9e4f5c..0000000 --- a/third_party/glm/ext/quaternion_float_precision.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref ext_quaternion_float_precision -/// @file glm/ext/quaternion_float_precision.hpp -/// -/// @defgroup ext_quaternion_float_precision GLM_EXT_quaternion_float_precision -/// @ingroup ext -/// -/// Exposes single-precision floating point quaternion type with various precision in term of ULPs. -/// -/// Include to use the features of this extension. - -#pragma once - -// Dependency: -#include "../detail/type_quat.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_float_precision extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_float_precision - /// @{ - - /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef qua lowp_quat; - - /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef qua mediump_quat; - - /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef qua highp_quat; - - /// @} -} //namespace glm - diff --git a/third_party/glm/ext/quaternion_geometric.hpp b/third_party/glm/ext/quaternion_geometric.hpp deleted file mode 100755 index 6d98bbe..0000000 --- a/third_party/glm/ext/quaternion_geometric.hpp +++ /dev/null @@ -1,70 +0,0 @@ -/// @ref ext_quaternion_geometric -/// @file glm/ext/quaternion_geometric.hpp -/// -/// @defgroup ext_quaternion_geometric GLM_EXT_quaternion_geometric -/// @ingroup ext -/// -/// Provides geometric functions for quaternion types -/// -/// Include to use the features of this extension. -/// -/// @see core_geometric -/// @see ext_quaternion_float -/// @see ext_quaternion_double - -#pragma once - -// Dependency: -#include "../geometric.hpp" -#include "../exponential.hpp" -#include "../ext/vector_relational.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_geometric extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_geometric - /// @{ - - /// Returns the norm of a quaternions - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_geometric - template - GLM_FUNC_DECL T length(qua const& q); - - /// Returns the normalized quaternion. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_geometric - template - GLM_FUNC_DECL qua normalize(qua const& q); - - /// Returns dot product of q1 and q2, i.e., q1[0] * q2[0] + q1[1] * q2[1] + ... - /// - /// @tparam T Floating-point scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_geometric - template - GLM_FUNC_DECL T dot(qua const& x, qua const& y); - - /// Compute a cross product. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_geometric - template - GLM_FUNC_QUALIFIER qua cross(qua const& q1, qua const& q2); - - /// @} -} //namespace glm - -#include "quaternion_geometric.inl" diff --git a/third_party/glm/ext/quaternion_geometric.inl b/third_party/glm/ext/quaternion_geometric.inl deleted file mode 100755 index e155ac5..0000000 --- a/third_party/glm/ext/quaternion_geometric.inl +++ /dev/null @@ -1,36 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER T dot(qua const& x, qua const& y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'dot' accepts only floating-point inputs"); - return detail::compute_dot, T, detail::is_aligned::value>::call(x, y); - } - - template - GLM_FUNC_QUALIFIER T length(qua const& q) - { - return glm::sqrt(dot(q, q)); - } - - template - GLM_FUNC_QUALIFIER qua normalize(qua const& q) - { - T len = length(q); - if(len <= static_cast(0)) // Problem - return qua(static_cast(1), static_cast(0), static_cast(0), static_cast(0)); - T oneOverLen = static_cast(1) / len; - return qua(q.w * oneOverLen, q.x * oneOverLen, q.y * oneOverLen, q.z * oneOverLen); - } - - template - GLM_FUNC_QUALIFIER qua cross(qua const& q1, qua const& q2) - { - return qua( - q1.w * q2.w - q1.x * q2.x - q1.y * q2.y - q1.z * q2.z, - q1.w * q2.x + q1.x * q2.w + q1.y * q2.z - q1.z * q2.y, - q1.w * q2.y + q1.y * q2.w + q1.z * q2.x - q1.x * q2.z, - q1.w * q2.z + q1.z * q2.w + q1.x * q2.y - q1.y * q2.x); - } -}//namespace glm - diff --git a/third_party/glm/ext/quaternion_relational.hpp b/third_party/glm/ext/quaternion_relational.hpp deleted file mode 100755 index 7aa121d..0000000 --- a/third_party/glm/ext/quaternion_relational.hpp +++ /dev/null @@ -1,62 +0,0 @@ -/// @ref ext_quaternion_relational -/// @file glm/ext/quaternion_relational.hpp -/// -/// @defgroup ext_quaternion_relational GLM_EXT_quaternion_relational -/// @ingroup ext -/// -/// Exposes comparison functions for quaternion types that take a user defined epsilon values. -/// -/// Include to use the features of this extension. -/// -/// @see core_vector_relational -/// @see ext_vector_relational -/// @see ext_matrix_relational -/// @see ext_quaternion_float -/// @see ext_quaternion_double - -#pragma once - -// Dependency: -#include "../vector_relational.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_relational extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_relational - /// @{ - - /// Returns the component-wise comparison of result x == y. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL vec<4, bool, Q> equal(qua const& x, qua const& y); - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL vec<4, bool, Q> equal(qua const& x, qua const& y, T epsilon); - - /// Returns the component-wise comparison of result x != y. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL vec<4, bool, Q> notEqual(qua const& x, qua const& y); - - /// Returns the component-wise comparison of |x - y| >= epsilon. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL vec<4, bool, Q> notEqual(qua const& x, qua const& y, T epsilon); - - /// @} -} //namespace glm - -#include "quaternion_relational.inl" diff --git a/third_party/glm/ext/quaternion_relational.inl b/third_party/glm/ext/quaternion_relational.inl deleted file mode 100755 index b1713e9..0000000 --- a/third_party/glm/ext/quaternion_relational.inl +++ /dev/null @@ -1,35 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> equal(qua const& x, qua const& y) - { - vec<4, bool, Q> Result; - for(length_t i = 0; i < x.length(); ++i) - Result[i] = x[i] == y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> equal(qua const& x, qua const& y, T epsilon) - { - vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); - return lessThan(abs(v), vec<4, T, Q>(epsilon)); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> notEqual(qua const& x, qua const& y) - { - vec<4, bool, Q> Result; - for(length_t i = 0; i < x.length(); ++i) - Result[i] = x[i] != y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> notEqual(qua const& x, qua const& y, T epsilon) - { - vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); - return greaterThanEqual(abs(v), vec<4, T, Q>(epsilon)); - } -}//namespace glm - diff --git a/third_party/glm/ext/quaternion_transform.hpp b/third_party/glm/ext/quaternion_transform.hpp deleted file mode 100755 index a9cc5c2..0000000 --- a/third_party/glm/ext/quaternion_transform.hpp +++ /dev/null @@ -1,47 +0,0 @@ -/// @ref ext_quaternion_transform -/// @file glm/ext/quaternion_transform.hpp -/// -/// @defgroup ext_quaternion_transform GLM_EXT_quaternion_transform -/// @ingroup ext -/// -/// Provides transformation functions for quaternion types -/// -/// Include to use the features of this extension. -/// -/// @see ext_quaternion_float -/// @see ext_quaternion_double -/// @see ext_quaternion_exponential -/// @see ext_quaternion_geometric -/// @see ext_quaternion_relational -/// @see ext_quaternion_trigonometric - -#pragma once - -// Dependency: -#include "../common.hpp" -#include "../trigonometric.hpp" -#include "../geometric.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_transform extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_transform - /// @{ - - /// Rotates a quaternion from a vector of 3 components axis and an angle. - /// - /// @param q Source orientation - /// @param angle Angle expressed in radians. - /// @param axis Axis of the rotation - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL qua rotate(qua const& q, T const& angle, vec<3, T, Q> const& axis); - /// @} -} //namespace glm - -#include "quaternion_transform.inl" diff --git a/third_party/glm/ext/quaternion_transform.inl b/third_party/glm/ext/quaternion_transform.inl deleted file mode 100755 index b87ecb6..0000000 --- a/third_party/glm/ext/quaternion_transform.inl +++ /dev/null @@ -1,24 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER qua rotate(qua const& q, T const& angle, vec<3, T, Q> const& v) - { - vec<3, T, Q> Tmp = v; - - // Axis of rotation must be normalised - T len = glm::length(Tmp); - if(abs(len - static_cast(1)) > static_cast(0.001)) - { - T oneOverLen = static_cast(1) / len; - Tmp.x *= oneOverLen; - Tmp.y *= oneOverLen; - Tmp.z *= oneOverLen; - } - - T const AngleRad(angle); - T const Sin = sin(AngleRad * static_cast(0.5)); - - return q * qua(cos(AngleRad * static_cast(0.5)), Tmp.x * Sin, Tmp.y * Sin, Tmp.z * Sin); - } -}//namespace glm - diff --git a/third_party/glm/ext/quaternion_trigonometric.hpp b/third_party/glm/ext/quaternion_trigonometric.hpp deleted file mode 100755 index 76cea27..0000000 --- a/third_party/glm/ext/quaternion_trigonometric.hpp +++ /dev/null @@ -1,63 +0,0 @@ -/// @ref ext_quaternion_trigonometric -/// @file glm/ext/quaternion_trigonometric.hpp -/// -/// @defgroup ext_quaternion_trigonometric GLM_EXT_quaternion_trigonometric -/// @ingroup ext -/// -/// Provides trigonometric functions for quaternion types -/// -/// Include to use the features of this extension. -/// -/// @see ext_quaternion_float -/// @see ext_quaternion_double -/// @see ext_quaternion_exponential -/// @see ext_quaternion_geometric -/// @see ext_quaternion_relational -/// @see ext_quaternion_transform - -#pragma once - -// Dependency: -#include "../trigonometric.hpp" -#include "../exponential.hpp" -#include "scalar_constants.hpp" -#include "vector_relational.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_trigonometric extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_trigonometric - /// @{ - - /// Returns the quaternion rotation angle. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL T angle(qua const& x); - - /// Returns the q rotation axis. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL vec<3, T, Q> axis(qua const& x); - - /// Build a quaternion from an angle and a normalized axis. - /// - /// @param angle Angle expressed in radians. - /// @param axis Axis of the quaternion, must be normalized. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua angleAxis(T const& angle, vec<3, T, Q> const& axis); - - /// @} -} //namespace glm - -#include "quaternion_trigonometric.inl" diff --git a/third_party/glm/ext/quaternion_trigonometric.inl b/third_party/glm/ext/quaternion_trigonometric.inl deleted file mode 100755 index 06b7c4c..0000000 --- a/third_party/glm/ext/quaternion_trigonometric.inl +++ /dev/null @@ -1,34 +0,0 @@ -#include "scalar_constants.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER T angle(qua const& x) - { - if (abs(x.w) > cos_one_over_two()) - { - return asin(sqrt(x.x * x.x + x.y * x.y + x.z * x.z)) * static_cast(2); - } - - return acos(x.w) * static_cast(2); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> axis(qua const& x) - { - T const tmp1 = static_cast(1) - x.w * x.w; - if(tmp1 <= static_cast(0)) - return vec<3, T, Q>(0, 0, 1); - T const tmp2 = static_cast(1) / sqrt(tmp1); - return vec<3, T, Q>(x.x * tmp2, x.y * tmp2, x.z * tmp2); - } - - template - GLM_FUNC_QUALIFIER qua angleAxis(T const& angle, vec<3, T, Q> const& v) - { - T const a(angle); - T const s = glm::sin(a * static_cast(0.5)); - - return qua(glm::cos(a * static_cast(0.5)), v * s); - } -}//namespace glm diff --git a/third_party/glm/ext/scalar_common.hpp b/third_party/glm/ext/scalar_common.hpp deleted file mode 100755 index 4ab0f88..0000000 --- a/third_party/glm/ext/scalar_common.hpp +++ /dev/null @@ -1,103 +0,0 @@ -/// @ref ext_scalar_common -/// @file glm/ext/scalar_common.hpp -/// -/// @defgroup ext_scalar_common GLM_EXT_scalar_common -/// @ingroup ext -/// -/// Exposes min and max functions for 3 to 4 scalar parameters. -/// -/// Include to use the features of this extension. -/// -/// @see core_func_common -/// @see ext_vector_common - -#pragma once - -// Dependency: -#include "../common.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_scalar_common extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_scalar_common - /// @{ - - /// Returns the minimum component-wise values of 3 inputs - /// - /// @tparam T A floating-point scalar type. - template - GLM_FUNC_DECL T min(T a, T b, T c); - - /// Returns the minimum component-wise values of 4 inputs - /// - /// @tparam T A floating-point scalar type. - template - GLM_FUNC_DECL T min(T a, T b, T c, T d); - - /// Returns the maximum component-wise values of 3 inputs - /// - /// @tparam T A floating-point scalar type. - template - GLM_FUNC_DECL T max(T a, T b, T c); - - /// Returns the maximum component-wise values of 4 inputs - /// - /// @tparam T A floating-point scalar type. - template - GLM_FUNC_DECL T max(T a, T b, T c, T d); - - /// Returns the minimum component-wise values of 2 inputs. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam T A floating-point scalar type. - /// - /// @see std::fmin documentation - template - GLM_FUNC_DECL T fmin(T a, T b); - - /// Returns the minimum component-wise values of 3 inputs. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam T A floating-point scalar type. - /// - /// @see std::fmin documentation - template - GLM_FUNC_DECL T fmin(T a, T b, T c); - - /// Returns the minimum component-wise values of 4 inputs. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam T A floating-point scalar type. - /// - /// @see std::fmin documentation - template - GLM_FUNC_DECL T fmin(T a, T b, T c, T d); - - /// Returns the maximum component-wise values of 2 inputs. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam T A floating-point scalar type. - /// - /// @see std::fmax documentation - template - GLM_FUNC_DECL T fmax(T a, T b); - - /// Returns the maximum component-wise values of 3 inputs. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam T A floating-point scalar type. - /// - /// @see std::fmax documentation - template - GLM_FUNC_DECL T fmax(T a, T b, T C); - - /// Returns the maximum component-wise values of 4 inputs. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam T A floating-point scalar type. - /// - /// @see std::fmax documentation - template - GLM_FUNC_DECL T fmax(T a, T b, T C, T D); - - /// @} -}//namespace glm - -#include "scalar_common.inl" diff --git a/third_party/glm/ext/scalar_common.inl b/third_party/glm/ext/scalar_common.inl deleted file mode 100755 index 118a670..0000000 --- a/third_party/glm/ext/scalar_common.inl +++ /dev/null @@ -1,115 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER T min(T a, T b, T c) - { - return glm::min(glm::min(a, b), c); - } - - template - GLM_FUNC_QUALIFIER T min(T a, T b, T c, T d) - { - return glm::min(glm::min(a, b), glm::min(c, d)); - } - - template - GLM_FUNC_QUALIFIER T max(T a, T b, T c) - { - return glm::max(glm::max(a, b), c); - } - - template - GLM_FUNC_QUALIFIER T max(T a, T b, T c, T d) - { - return glm::max(glm::max(a, b), glm::max(c, d)); - } - -# if GLM_HAS_CXX11_STL - using std::fmin; -# else - template - GLM_FUNC_QUALIFIER T fmin(T a, T b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point input"); - - if (isnan(a)) - return b; - return min(a, b); - } -# endif - - template - GLM_FUNC_QUALIFIER T fmin(T a, T b, T c) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point input"); - - if (isnan(a)) - return fmin(b, c); - if (isnan(b)) - return fmin(a, c); - if (isnan(c)) - return min(a, b); - return min(a, b, c); - } - - template - GLM_FUNC_QUALIFIER T fmin(T a, T b, T c, T d) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point input"); - - if (isnan(a)) - return fmin(b, c, d); - if (isnan(b)) - return min(a, fmin(c, d)); - if (isnan(c)) - return fmin(min(a, b), d); - if (isnan(d)) - return min(a, b, c); - return min(a, b, c, d); - } - - -# if GLM_HAS_CXX11_STL - using std::fmax; -# else - template - GLM_FUNC_QUALIFIER T fmax(T a, T b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point input"); - - if (isnan(a)) - return b; - return max(a, b); - } -# endif - - template - GLM_FUNC_QUALIFIER T fmax(T a, T b, T c) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point input"); - - if (isnan(a)) - return fmax(b, c); - if (isnan(b)) - return fmax(a, c); - if (isnan(c)) - return max(a, b); - return max(a, b, c); - } - - template - GLM_FUNC_QUALIFIER T fmax(T a, T b, T c, T d) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point input"); - - if (isnan(a)) - return fmax(b, c, d); - if (isnan(b)) - return max(a, fmax(c, d)); - if (isnan(c)) - return fmax(max(a, b), d); - if (isnan(d)) - return max(a, b, c); - return max(a, b, c, d); - } -}//namespace glm diff --git a/third_party/glm/ext/scalar_constants.hpp b/third_party/glm/ext/scalar_constants.hpp deleted file mode 100755 index 74e210d..0000000 --- a/third_party/glm/ext/scalar_constants.hpp +++ /dev/null @@ -1,40 +0,0 @@ -/// @ref ext_scalar_constants -/// @file glm/ext/scalar_constants.hpp -/// -/// @defgroup ext_scalar_constants GLM_EXT_scalar_constants -/// @ingroup ext -/// -/// Provides a list of constants and precomputed useful values. -/// -/// Include to use the features of this extension. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_scalar_constants extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_scalar_constants - /// @{ - - /// Return the epsilon constant for floating point types. - template - GLM_FUNC_DECL GLM_CONSTEXPR genType epsilon(); - - /// Return the pi constant for floating point types. - template - GLM_FUNC_DECL GLM_CONSTEXPR genType pi(); - - /// Return the value of cos(1 / 2) for floating point types. - template - GLM_FUNC_DECL GLM_CONSTEXPR genType cos_one_over_two(); - - /// @} -} //namespace glm - -#include "scalar_constants.inl" diff --git a/third_party/glm/ext/scalar_constants.inl b/third_party/glm/ext/scalar_constants.inl deleted file mode 100755 index b475adf..0000000 --- a/third_party/glm/ext/scalar_constants.inl +++ /dev/null @@ -1,24 +0,0 @@ -#include - -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType epsilon() - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'epsilon' only accepts floating-point inputs"); - return std::numeric_limits::epsilon(); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType pi() - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'pi' only accepts floating-point inputs"); - return static_cast(3.14159265358979323846264338327950288); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType cos_one_over_two() - { - return genType(0.877582561890372716130286068203503191); - } -} //namespace glm diff --git a/third_party/glm/ext/scalar_int_sized.hpp b/third_party/glm/ext/scalar_int_sized.hpp deleted file mode 100755 index 8e9c511..0000000 --- a/third_party/glm/ext/scalar_int_sized.hpp +++ /dev/null @@ -1,70 +0,0 @@ -/// @ref ext_scalar_int_sized -/// @file glm/ext/scalar_int_sized.hpp -/// -/// @defgroup ext_scalar_int_sized GLM_EXT_scalar_int_sized -/// @ingroup ext -/// -/// Exposes sized signed integer scalar types. -/// -/// Include to use the features of this extension. -/// -/// @see ext_scalar_uint_sized - -#pragma once - -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_scalar_int_sized extension included") -#endif - -namespace glm{ -namespace detail -{ -# if GLM_HAS_EXTENDED_INTEGER_TYPE - typedef std::int8_t int8; - typedef std::int16_t int16; - typedef std::int32_t int32; -# else - typedef signed char int8; - typedef signed short int16; - typedef signed int int32; -#endif// - - template<> - struct is_int - { - enum test {value = ~0}; - }; - - template<> - struct is_int - { - enum test {value = ~0}; - }; - - template<> - struct is_int - { - enum test {value = ~0}; - }; -}//namespace detail - - - /// @addtogroup ext_scalar_int_sized - /// @{ - - /// 8 bit signed integer type. - typedef detail::int8 int8; - - /// 16 bit signed integer type. - typedef detail::int16 int16; - - /// 32 bit signed integer type. - typedef detail::int32 int32; - - /// 64 bit signed integer type. - typedef detail::int64 int64; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/scalar_integer.hpp b/third_party/glm/ext/scalar_integer.hpp deleted file mode 100755 index a2ca8a2..0000000 --- a/third_party/glm/ext/scalar_integer.hpp +++ /dev/null @@ -1,92 +0,0 @@ -/// @ref ext_scalar_integer -/// @file glm/ext/scalar_integer.hpp -/// -/// @see core (dependence) -/// -/// @defgroup ext_scalar_integer GLM_EXT_scalar_integer -/// @ingroup ext -/// -/// Include to use the features of this extension. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/_vectorize.hpp" -#include "../detail/type_float.hpp" -#include "../vector_relational.hpp" -#include "../common.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_scalar_integer extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_scalar_integer - /// @{ - - /// Return true if the value is a power of two number. - /// - /// @see ext_scalar_integer - template - GLM_FUNC_DECL bool isPowerOfTwo(genIUType v); - - /// Return the power of two number which value is just higher the input value, - /// round up to a power of two. - /// - /// @see ext_scalar_integer - template - GLM_FUNC_DECL genIUType nextPowerOfTwo(genIUType v); - - /// Return the power of two number which value is just lower the input value, - /// round down to a power of two. - /// - /// @see ext_scalar_integer - template - GLM_FUNC_DECL genIUType prevPowerOfTwo(genIUType v); - - /// Return true if the 'Value' is a multiple of 'Multiple'. - /// - /// @see ext_scalar_integer - template - GLM_FUNC_DECL bool isMultiple(genIUType v, genIUType Multiple); - - /// Higher multiple number of Source. - /// - /// @tparam genIUType Integer scalar or vector types. - /// - /// @param v Source value to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see ext_scalar_integer - template - GLM_FUNC_DECL genIUType nextMultiple(genIUType v, genIUType Multiple); - - /// Lower multiple number of Source. - /// - /// @tparam genIUType Integer scalar or vector types. - /// - /// @param v Source value to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see ext_scalar_integer - template - GLM_FUNC_DECL genIUType prevMultiple(genIUType v, genIUType Multiple); - - /// Returns the bit number of the Nth significant bit set to - /// 1 in the binary representation of value. - /// If value bitcount is less than the Nth significant bit, -1 will be returned. - /// - /// @tparam genIUType Signed or unsigned integer scalar types. - /// - /// @see ext_scalar_integer - template - GLM_FUNC_DECL int findNSB(genIUType x, int significantBitCount); - - /// @} -} //namespace glm - -#include "scalar_integer.inl" diff --git a/third_party/glm/ext/scalar_integer.inl b/third_party/glm/ext/scalar_integer.inl deleted file mode 100755 index efba960..0000000 --- a/third_party/glm/ext/scalar_integer.inl +++ /dev/null @@ -1,243 +0,0 @@ -#include "../integer.hpp" - -namespace glm{ -namespace detail -{ - template - struct compute_ceilShift - { - GLM_FUNC_QUALIFIER static vec call(vec const& v, T) - { - return v; - } - }; - - template - struct compute_ceilShift - { - GLM_FUNC_QUALIFIER static vec call(vec const& v, T Shift) - { - return v | (v >> Shift); - } - }; - - template - struct compute_ceilPowerOfTwo - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - GLM_STATIC_ASSERT(!std::numeric_limits::is_iec559, "'ceilPowerOfTwo' only accept integer scalar or vector inputs"); - - vec const Sign(sign(x)); - - vec v(abs(x)); - - v = v - static_cast(1); - v = v | (v >> static_cast(1)); - v = v | (v >> static_cast(2)); - v = v | (v >> static_cast(4)); - v = compute_ceilShift= 2>::call(v, 8); - v = compute_ceilShift= 4>::call(v, 16); - v = compute_ceilShift= 8>::call(v, 32); - return (v + static_cast(1)) * Sign; - } - }; - - template - struct compute_ceilPowerOfTwo - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - GLM_STATIC_ASSERT(!std::numeric_limits::is_iec559, "'ceilPowerOfTwo' only accept integer scalar or vector inputs"); - - vec v(x); - - v = v - static_cast(1); - v = v | (v >> static_cast(1)); - v = v | (v >> static_cast(2)); - v = v | (v >> static_cast(4)); - v = compute_ceilShift= 2>::call(v, 8); - v = compute_ceilShift= 4>::call(v, 16); - v = compute_ceilShift= 8>::call(v, 32); - return v + static_cast(1); - } - }; - - template - struct compute_ceilMultiple{}; - - template<> - struct compute_ceilMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - if(Source > genType(0)) - return Source + (Multiple - std::fmod(Source, Multiple)); - else - return Source + std::fmod(-Source, Multiple); - } - }; - - template<> - struct compute_ceilMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - genType Tmp = Source - genType(1); - return Tmp + (Multiple - (Tmp % Multiple)); - } - }; - - template<> - struct compute_ceilMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - assert(Multiple > genType(0)); - if(Source > genType(0)) - { - genType Tmp = Source - genType(1); - return Tmp + (Multiple - (Tmp % Multiple)); - } - else - return Source + (-Source % Multiple); - } - }; - - template - struct compute_floorMultiple{}; - - template<> - struct compute_floorMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - if(Source >= genType(0)) - return Source - std::fmod(Source, Multiple); - else - return Source - std::fmod(Source, Multiple) - Multiple; - } - }; - - template<> - struct compute_floorMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - if(Source >= genType(0)) - return Source - Source % Multiple; - else - { - genType Tmp = Source + genType(1); - return Tmp - Tmp % Multiple - Multiple; - } - } - }; - - template<> - struct compute_floorMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - if(Source >= genType(0)) - return Source - Source % Multiple; - else - { - genType Tmp = Source + genType(1); - return Tmp - Tmp % Multiple - Multiple; - } - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER bool isPowerOfTwo(genIUType Value) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isPowerOfTwo' only accept integer inputs"); - - genIUType const Result = glm::abs(Value); - return !(Result & (Result - 1)); - } - - template - GLM_FUNC_QUALIFIER genIUType nextPowerOfTwo(genIUType value) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextPowerOfTwo' only accept integer inputs"); - - return detail::compute_ceilPowerOfTwo<1, genIUType, defaultp, std::numeric_limits::is_signed>::call(vec<1, genIUType, defaultp>(value)).x; - } - - template - GLM_FUNC_QUALIFIER genIUType prevPowerOfTwo(genIUType value) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevPowerOfTwo' only accept integer inputs"); - - return isPowerOfTwo(value) ? value : static_cast(static_cast(1) << static_cast(findMSB(value))); - } - - template - GLM_FUNC_QUALIFIER bool isMultiple(genIUType Value, genIUType Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isMultiple' only accept integer inputs"); - - return isMultiple(vec<1, genIUType>(Value), vec<1, genIUType>(Multiple)).x; - } - - template - GLM_FUNC_QUALIFIER genIUType nextMultiple(genIUType Source, genIUType Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextMultiple' only accept integer inputs"); - - return detail::compute_ceilMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); - } - - template - GLM_FUNC_QUALIFIER genIUType prevMultiple(genIUType Source, genIUType Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevMultiple' only accept integer inputs"); - - return detail::compute_floorMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); - } - - template - GLM_FUNC_QUALIFIER int findNSB(genIUType x, int significantBitCount) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findNSB' only accept integer inputs"); - - if(bitCount(x) < significantBitCount) - return -1; - - genIUType const One = static_cast(1); - int bitPos = 0; - - genIUType key = x; - int nBitCount = significantBitCount; - int Step = sizeof(x) * 8 / 2; - while (key > One) - { - genIUType Mask = static_cast((One << Step) - One); - genIUType currentKey = key & Mask; - int currentBitCount = bitCount(currentKey); - if (nBitCount > currentBitCount) - { - nBitCount -= currentBitCount; - bitPos += Step; - key >>= static_cast(Step); - } - else - { - key = key & Mask; - } - - Step >>= 1; - } - - return static_cast(bitPos); - } -}//namespace glm diff --git a/third_party/glm/ext/scalar_relational.hpp b/third_party/glm/ext/scalar_relational.hpp deleted file mode 100755 index 3076a5e..0000000 --- a/third_party/glm/ext/scalar_relational.hpp +++ /dev/null @@ -1,65 +0,0 @@ -/// @ref ext_scalar_relational -/// @file glm/ext/scalar_relational.hpp -/// -/// @defgroup ext_scalar_relational GLM_EXT_scalar_relational -/// @ingroup ext -/// -/// Exposes comparison functions for scalar types that take a user defined epsilon values. -/// -/// Include to use the features of this extension. -/// -/// @see core_vector_relational -/// @see ext_vector_relational -/// @see ext_matrix_relational - -#pragma once - -// Dependencies -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_scalar_relational extension included") -#endif - -namespace glm -{ - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is satisfied. - /// - /// @tparam genType Floating-point or integer scalar types - template - GLM_FUNC_DECL GLM_CONSTEXPR bool equal(genType const& x, genType const& y, genType const& epsilon); - - /// Returns the component-wise comparison of |x - y| >= epsilon. - /// True if this expression is not satisfied. - /// - /// @tparam genType Floating-point or integer scalar types - template - GLM_FUNC_DECL GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, genType const& epsilon); - - /// Returns the component-wise comparison between two scalars in term of ULPs. - /// True if this expression is satisfied. - /// - /// @param x First operand. - /// @param y Second operand. - /// @param ULPs Maximum difference in ULPs between the two operators to consider them equal. - /// - /// @tparam genType Floating-point or integer scalar types - template - GLM_FUNC_DECL GLM_CONSTEXPR bool equal(genType const& x, genType const& y, int ULPs); - - /// Returns the component-wise comparison between two scalars in term of ULPs. - /// True if this expression is not satisfied. - /// - /// @param x First operand. - /// @param y Second operand. - /// @param ULPs Maximum difference in ULPs between the two operators to consider them not equal. - /// - /// @tparam genType Floating-point or integer scalar types - template - GLM_FUNC_DECL GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, int ULPs); - - /// @} -}//namespace glm - -#include "scalar_relational.inl" diff --git a/third_party/glm/ext/scalar_relational.inl b/third_party/glm/ext/scalar_relational.inl deleted file mode 100755 index c85583e..0000000 --- a/third_party/glm/ext/scalar_relational.inl +++ /dev/null @@ -1,40 +0,0 @@ -#include "../common.hpp" -#include "../ext/scalar_int_sized.hpp" -#include "../ext/scalar_uint_sized.hpp" -#include "../detail/type_float.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool equal(genType const& x, genType const& y, genType const& epsilon) - { - return abs(x - y) <= epsilon; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, genType const& epsilon) - { - return abs(x - y) > epsilon; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool equal(genType const& x, genType const& y, int MaxULPs) - { - detail::float_t const a(x); - detail::float_t const b(y); - - // Different signs means they do not match. - if(a.negative() != b.negative()) - return false; - - // Find the difference in ULPs. - typename detail::float_t::int_type const DiffULPs = abs(a.i - b.i); - return DiffULPs <= MaxULPs; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, int ULPs) - { - return !equal(x, y, ULPs); - } -}//namespace glm diff --git a/third_party/glm/ext/scalar_uint_sized.hpp b/third_party/glm/ext/scalar_uint_sized.hpp deleted file mode 100755 index fd5267f..0000000 --- a/third_party/glm/ext/scalar_uint_sized.hpp +++ /dev/null @@ -1,70 +0,0 @@ -/// @ref ext_scalar_uint_sized -/// @file glm/ext/scalar_uint_sized.hpp -/// -/// @defgroup ext_scalar_uint_sized GLM_EXT_scalar_uint_sized -/// @ingroup ext -/// -/// Exposes sized unsigned integer scalar types. -/// -/// Include to use the features of this extension. -/// -/// @see ext_scalar_int_sized - -#pragma once - -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_scalar_uint_sized extension included") -#endif - -namespace glm{ -namespace detail -{ -# if GLM_HAS_EXTENDED_INTEGER_TYPE - typedef std::uint8_t uint8; - typedef std::uint16_t uint16; - typedef std::uint32_t uint32; -# else - typedef unsigned char uint8; - typedef unsigned short uint16; - typedef unsigned int uint32; -#endif - - template<> - struct is_int - { - enum test {value = ~0}; - }; - - template<> - struct is_int - { - enum test {value = ~0}; - }; - - template<> - struct is_int - { - enum test {value = ~0}; - }; -}//namespace detail - - - /// @addtogroup ext_scalar_uint_sized - /// @{ - - /// 8 bit unsigned integer type. - typedef detail::uint8 uint8; - - /// 16 bit unsigned integer type. - typedef detail::uint16 uint16; - - /// 32 bit unsigned integer type. - typedef detail::uint32 uint32; - - /// 64 bit unsigned integer type. - typedef detail::uint64 uint64; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/scalar_ulp.hpp b/third_party/glm/ext/scalar_ulp.hpp deleted file mode 100755 index 941ada3..0000000 --- a/third_party/glm/ext/scalar_ulp.hpp +++ /dev/null @@ -1,74 +0,0 @@ -/// @ref ext_scalar_ulp -/// @file glm/ext/scalar_ulp.hpp -/// -/// @defgroup ext_scalar_ulp GLM_EXT_scalar_ulp -/// @ingroup ext -/// -/// Allow the measurement of the accuracy of a function against a reference -/// implementation. This extension works on floating-point data and provide results -/// in ULP. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_ulp -/// @see ext_scalar_relational - -#pragma once - -// Dependencies -#include "../ext/scalar_int_sized.hpp" -#include "../common.hpp" -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_scalar_ulp extension included") -#endif - -namespace glm -{ - /// Return the next ULP value(s) after the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL genType nextFloat(genType x); - - /// Return the previous ULP value(s) before the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL genType prevFloat(genType x); - - /// Return the value(s) ULP distance after the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL genType nextFloat(genType x, int ULPs); - - /// Return the value(s) ULP distance before the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL genType prevFloat(genType x, int ULPs); - - /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. - /// - /// @see ext_scalar_ulp - GLM_FUNC_DECL int floatDistance(float x, float y); - - /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. - /// - /// @see ext_scalar_ulp - GLM_FUNC_DECL int64 floatDistance(double x, double y); - - /// @} -}//namespace glm - -#include "scalar_ulp.inl" diff --git a/third_party/glm/ext/scalar_ulp.inl b/third_party/glm/ext/scalar_ulp.inl deleted file mode 100755 index 308df15..0000000 --- a/third_party/glm/ext/scalar_ulp.inl +++ /dev/null @@ -1,284 +0,0 @@ -/// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. -/// -/// Developed at SunPro, a Sun Microsystems, Inc. business. -/// Permission to use, copy, modify, and distribute this -/// software is freely granted, provided that this notice -/// is preserved. - -#include "../detail/type_float.hpp" -#include "../ext/scalar_constants.hpp" -#include -#include - -#if(GLM_COMPILER & GLM_COMPILER_VC) -# pragma warning(push) -# pragma warning(disable : 4127) -#endif - -typedef union -{ - float value; - /* FIXME: Assumes 32 bit int. */ - unsigned int word; -} ieee_float_shape_type; - -typedef union -{ - double value; - struct - { - int lsw; - int msw; - } parts; -} ieee_double_shape_type; - -#define GLM_EXTRACT_WORDS(ix0,ix1,d) \ - do { \ - ieee_double_shape_type ew_u; \ - ew_u.value = (d); \ - (ix0) = ew_u.parts.msw; \ - (ix1) = ew_u.parts.lsw; \ - } while (0) - -#define GLM_GET_FLOAT_WORD(i,d) \ - do { \ - ieee_float_shape_type gf_u; \ - gf_u.value = (d); \ - (i) = gf_u.word; \ - } while (0) - -#define GLM_SET_FLOAT_WORD(d,i) \ - do { \ - ieee_float_shape_type sf_u; \ - sf_u.word = (i); \ - (d) = sf_u.value; \ - } while (0) - -#define GLM_INSERT_WORDS(d,ix0,ix1) \ - do { \ - ieee_double_shape_type iw_u; \ - iw_u.parts.msw = (ix0); \ - iw_u.parts.lsw = (ix1); \ - (d) = iw_u.value; \ - } while (0) - -namespace glm{ -namespace detail -{ - GLM_FUNC_QUALIFIER float nextafterf(float x, float y) - { - volatile float t; - int hx, hy, ix, iy; - - GLM_GET_FLOAT_WORD(hx, x); - GLM_GET_FLOAT_WORD(hy, y); - ix = hx & 0x7fffffff; // |x| - iy = hy & 0x7fffffff; // |y| - - if((ix > 0x7f800000) || // x is nan - (iy > 0x7f800000)) // y is nan - return x + y; - if(abs(y - x) <= epsilon()) - return y; // x=y, return y - if(ix == 0) - { // x == 0 - GLM_SET_FLOAT_WORD(x, (hy & 0x80000000) | 1);// return +-minsubnormal - t = x * x; - if(abs(t - x) <= epsilon()) - return t; - else - return x; // raise underflow flag - } - if(hx >= 0) - { // x > 0 - if(hx > hy) // x > y, x -= ulp - hx -= 1; - else // x < y, x += ulp - hx += 1; - } - else - { // x < 0 - if(hy >= 0 || hx > hy) // x < y, x -= ulp - hx -= 1; - else // x > y, x += ulp - hx += 1; - } - hy = hx & 0x7f800000; - if(hy >= 0x7f800000) - return x + x; // overflow - if(hy < 0x00800000) // underflow - { - t = x * x; - if(abs(t - x) > epsilon()) - { // raise underflow flag - GLM_SET_FLOAT_WORD(y, hx); - return y; - } - } - GLM_SET_FLOAT_WORD(x, hx); - return x; - } - - GLM_FUNC_QUALIFIER double nextafter(double x, double y) - { - volatile double t; - int hx, hy, ix, iy; - unsigned int lx, ly; - - GLM_EXTRACT_WORDS(hx, lx, x); - GLM_EXTRACT_WORDS(hy, ly, y); - ix = hx & 0x7fffffff; // |x| - iy = hy & 0x7fffffff; // |y| - - if(((ix >= 0x7ff00000) && ((ix - 0x7ff00000) | lx) != 0) || // x is nan - ((iy >= 0x7ff00000) && ((iy - 0x7ff00000) | ly) != 0)) // y is nan - return x + y; - if(abs(y - x) <= epsilon()) - return y; // x=y, return y - if((ix | lx) == 0) - { // x == 0 - GLM_INSERT_WORDS(x, hy & 0x80000000, 1); // return +-minsubnormal - t = x * x; - if(abs(t - x) <= epsilon()) - return t; - else - return x; // raise underflow flag - } - if(hx >= 0) { // x > 0 - if(hx > hy || ((hx == hy) && (lx > ly))) { // x > y, x -= ulp - if(lx == 0) hx -= 1; - lx -= 1; - } - else { // x < y, x += ulp - lx += 1; - if(lx == 0) hx += 1; - } - } - else { // x < 0 - if(hy >= 0 || hx > hy || ((hx == hy) && (lx > ly))){// x < y, x -= ulp - if(lx == 0) hx -= 1; - lx -= 1; - } - else { // x > y, x += ulp - lx += 1; - if(lx == 0) hx += 1; - } - } - hy = hx & 0x7ff00000; - if(hy >= 0x7ff00000) - return x + x; // overflow - if(hy < 0x00100000) - { // underflow - t = x * x; - if(abs(t - x) > epsilon()) - { // raise underflow flag - GLM_INSERT_WORDS(y, hx, lx); - return y; - } - } - GLM_INSERT_WORDS(x, hx, lx); - return x; - } -}//namespace detail -}//namespace glm - -#if(GLM_COMPILER & GLM_COMPILER_VC) -# pragma warning(pop) -#endif - -namespace glm -{ - template<> - GLM_FUNC_QUALIFIER float nextFloat(float x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::max()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return detail::nextafterf(x, FLT_MAX); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafterf(x, FLT_MAX); -# else - return nextafterf(x, FLT_MAX); -# endif - } - - template<> - GLM_FUNC_QUALIFIER double nextFloat(double x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::max()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return detail::nextafter(x, std::numeric_limits::max()); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafter(x, DBL_MAX); -# else - return nextafter(x, DBL_MAX); -# endif - } - - template - GLM_FUNC_QUALIFIER T nextFloat(T x, int ULPs) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'next_float' only accept floating-point input"); - assert(ULPs >= 0); - - T temp = x; - for(int i = 0; i < ULPs; ++i) - temp = nextFloat(temp); - return temp; - } - - GLM_FUNC_QUALIFIER float prevFloat(float x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::min()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return detail::nextafterf(x, FLT_MIN); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafterf(x, FLT_MIN); -# else - return nextafterf(x, FLT_MIN); -# endif - } - - GLM_FUNC_QUALIFIER double prevFloat(double x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::min()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return _nextafter(x, DBL_MIN); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafter(x, DBL_MIN); -# else - return nextafter(x, DBL_MIN); -# endif - } - - template - GLM_FUNC_QUALIFIER T prevFloat(T x, int ULPs) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'prev_float' only accept floating-point input"); - assert(ULPs >= 0); - - T temp = x; - for(int i = 0; i < ULPs; ++i) - temp = prevFloat(temp); - return temp; - } - - GLM_FUNC_QUALIFIER int floatDistance(float x, float y) - { - detail::float_t const a(x); - detail::float_t const b(y); - - return abs(a.i - b.i); - } - - GLM_FUNC_QUALIFIER int64 floatDistance(double x, double y) - { - detail::float_t const a(x); - detail::float_t const b(y); - - return abs(a.i - b.i); - } -}//namespace glm diff --git a/third_party/glm/ext/vector_bool1.hpp b/third_party/glm/ext/vector_bool1.hpp deleted file mode 100755 index 002c320..0000000 --- a/third_party/glm/ext/vector_bool1.hpp +++ /dev/null @@ -1,30 +0,0 @@ -/// @ref ext_vector_bool1 -/// @file glm/ext/vector_bool1.hpp -/// -/// @defgroup ext_vector_bool1 GLM_EXT_vector_bool1 -/// @ingroup ext -/// -/// Exposes bvec1 vector type. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_bool1_precision extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_bool1 extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_bool1 - /// @{ - - /// 1 components vector of boolean. - typedef vec<1, bool, defaultp> bvec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_bool1_precision.hpp b/third_party/glm/ext/vector_bool1_precision.hpp deleted file mode 100755 index e62d3cf..0000000 --- a/third_party/glm/ext/vector_bool1_precision.hpp +++ /dev/null @@ -1,34 +0,0 @@ -/// @ref ext_vector_bool1_precision -/// @file glm/ext/vector_bool1_precision.hpp -/// -/// @defgroup ext_vector_bool1_precision GLM_EXT_vector_bool1_precision -/// @ingroup ext -/// -/// Exposes highp_bvec1, mediump_bvec1 and lowp_bvec1 types. -/// -/// Include to use the features of this extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_bool1_precision extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_bool1_precision - /// @{ - - /// 1 component vector of bool values. - typedef vec<1, bool, highp> highp_bvec1; - - /// 1 component vector of bool values. - typedef vec<1, bool, mediump> mediump_bvec1; - - /// 1 component vector of bool values. - typedef vec<1, bool, lowp> lowp_bvec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_bool2.hpp b/third_party/glm/ext/vector_bool2.hpp deleted file mode 100755 index 52288b7..0000000 --- a/third_party/glm/ext/vector_bool2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_bool2.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 2 components vector of boolean. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<2, bool, defaultp> bvec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_bool2_precision.hpp b/third_party/glm/ext/vector_bool2_precision.hpp deleted file mode 100755 index 4370933..0000000 --- a/third_party/glm/ext/vector_bool2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_bool2_precision.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 2 components vector of high qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, bool, highp> highp_bvec2; - - /// 2 components vector of medium qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, bool, mediump> mediump_bvec2; - - /// 2 components vector of low qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, bool, lowp> lowp_bvec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_bool3.hpp b/third_party/glm/ext/vector_bool3.hpp deleted file mode 100755 index 90a0b7e..0000000 --- a/third_party/glm/ext/vector_bool3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_bool3.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 3 components vector of boolean. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<3, bool, defaultp> bvec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_bool3_precision.hpp b/third_party/glm/ext/vector_bool3_precision.hpp deleted file mode 100755 index 89cd2d3..0000000 --- a/third_party/glm/ext/vector_bool3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_bool3_precision.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 3 components vector of high qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, bool, highp> highp_bvec3; - - /// 3 components vector of medium qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, bool, mediump> mediump_bvec3; - - /// 3 components vector of low qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, bool, lowp> lowp_bvec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_bool4.hpp b/third_party/glm/ext/vector_bool4.hpp deleted file mode 100755 index 18aa71b..0000000 --- a/third_party/glm/ext/vector_bool4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_bool4.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 4 components vector of boolean. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<4, bool, defaultp> bvec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_bool4_precision.hpp b/third_party/glm/ext/vector_bool4_precision.hpp deleted file mode 100755 index 79786e5..0000000 --- a/third_party/glm/ext/vector_bool4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_bool4_precision.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 4 components vector of high qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, bool, highp> highp_bvec4; - - /// 4 components vector of medium qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, bool, mediump> mediump_bvec4; - - /// 4 components vector of low qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, bool, lowp> lowp_bvec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_common.hpp b/third_party/glm/ext/vector_common.hpp deleted file mode 100755 index 324fe1c..0000000 --- a/third_party/glm/ext/vector_common.hpp +++ /dev/null @@ -1,144 +0,0 @@ -/// @ref ext_vector_common -/// @file glm/ext/vector_common.hpp -/// -/// @defgroup ext_vector_common GLM_EXT_vector_common -/// @ingroup ext -/// -/// Exposes min and max functions for 3 to 4 vector parameters. -/// -/// Include to use the features of this extension. -/// -/// @see core_common -/// @see ext_scalar_common - -#pragma once - -// Dependency: -#include "../ext/scalar_common.hpp" -#include "../common.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_common extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_common - /// @{ - - /// Return the minimum component-wise values of 3 inputs - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& a, vec const& b, vec const& c); - - /// Return the minimum component-wise values of 4 inputs - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& a, vec const& b, vec const& c, vec const& d); - - /// Return the maximum component-wise values of 3 inputs - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec max(vec const& x, vec const& y, vec const& z); - - /// Return the maximum component-wise values of 4 inputs - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec max( vec const& x, vec const& y, vec const& z, vec const& w); - - /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmin documentation - template - GLM_FUNC_DECL vec fmin(vec const& x, T y); - - /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmin documentation - template - GLM_FUNC_DECL vec fmin(vec const& x, vec const& y); - - /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmin documentation - template - GLM_FUNC_DECL vec fmin(vec const& a, vec const& b, vec const& c); - - /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmin documentation - template - GLM_FUNC_DECL vec fmin(vec const& a, vec const& b, vec const& c, vec const& d); - - /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmax documentation - template - GLM_FUNC_DECL vec fmax(vec const& a, T b); - - /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmax documentation - template - GLM_FUNC_DECL vec fmax(vec const& a, vec const& b); - - /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmax documentation - template - GLM_FUNC_DECL vec fmax(vec const& a, vec const& b, vec const& c); - - /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmax documentation - template - GLM_FUNC_DECL vec fmax(vec const& a, vec const& b, vec const& c, vec const& d); - - /// @} -}//namespace glm - -#include "vector_common.inl" diff --git a/third_party/glm/ext/vector_common.inl b/third_party/glm/ext/vector_common.inl deleted file mode 100755 index 71f3809..0000000 --- a/third_party/glm/ext/vector_common.inl +++ /dev/null @@ -1,88 +0,0 @@ -#include "../detail/_vectorize.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& x, vec const& y, vec const& z) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'min' only accept floating-point or integer inputs"); - return glm::min(glm::min(x, y), z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& x, vec const& y, vec const& z, vec const& w) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'min' only accept floating-point or integer inputs"); - return glm::min(glm::min(x, y), glm::min(z, w)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& x, vec const& y, vec const& z) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'max' only accept floating-point or integer inputs"); - return glm::max(glm::max(x, y), z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& x, vec const& y, vec const& z, vec const& w) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'max' only accept floating-point or integer inputs"); - return glm::max(glm::max(x, y), glm::max(z, w)); - } - - template - GLM_FUNC_QUALIFIER vec fmin(vec const& a, T b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); - return detail::functor2::call(fmin, a, vec(b)); - } - - template - GLM_FUNC_QUALIFIER vec fmin(vec const& a, vec const& b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); - return detail::functor2::call(fmin, a, b); - } - - template - GLM_FUNC_QUALIFIER vec fmin(vec const& a, vec const& b, vec const& c) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); - return fmin(fmin(a, b), c); - } - - template - GLM_FUNC_QUALIFIER vec fmin(vec const& a, vec const& b, vec const& c, vec const& d) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); - return fmin(fmin(a, b), fmin(c, d)); - } - - template - GLM_FUNC_QUALIFIER vec fmax(vec const& a, T b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); - return detail::functor2::call(fmax, a, vec(b)); - } - - template - GLM_FUNC_QUALIFIER vec fmax(vec const& a, vec const& b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); - return detail::functor2::call(fmax, a, b); - } - - template - GLM_FUNC_QUALIFIER vec fmax(vec const& a, vec const& b, vec const& c) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); - return fmax(fmax(a, b), c); - } - - template - GLM_FUNC_QUALIFIER vec fmax(vec const& a, vec const& b, vec const& c, vec const& d) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); - return fmax(fmax(a, b), fmax(c, d)); - } -}//namespace glm diff --git a/third_party/glm/ext/vector_double1.hpp b/third_party/glm/ext/vector_double1.hpp deleted file mode 100755 index 3882667..0000000 --- a/third_party/glm/ext/vector_double1.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref ext_vector_double1 -/// @file glm/ext/vector_double1.hpp -/// -/// @defgroup ext_vector_double1 GLM_EXT_vector_double1 -/// @ingroup ext -/// -/// Exposes double-precision floating point vector type with one component. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_double1_precision extension. -/// @see ext_vector_float1 extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_double1 extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_double1 - /// @{ - - /// 1 components vector of double-precision floating-point numbers. - typedef vec<1, double, defaultp> dvec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_double1_precision.hpp b/third_party/glm/ext/vector_double1_precision.hpp deleted file mode 100755 index 1d47195..0000000 --- a/third_party/glm/ext/vector_double1_precision.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref ext_vector_double1_precision -/// @file glm/ext/vector_double1_precision.hpp -/// -/// @defgroup ext_vector_double1_precision GLM_EXT_vector_double1_precision -/// @ingroup ext -/// -/// Exposes highp_dvec1, mediump_dvec1 and lowp_dvec1 types. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_double1 - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_double1_precision extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_double1_precision - /// @{ - - /// 1 component vector of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<1, double, highp> highp_dvec1; - - /// 1 component vector of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<1, double, mediump> mediump_dvec1; - - /// 1 component vector of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<1, double, lowp> lowp_dvec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_double2.hpp b/third_party/glm/ext/vector_double2.hpp deleted file mode 100755 index 60e3577..0000000 --- a/third_party/glm/ext/vector_double2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_double2.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 2 components vector of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<2, double, defaultp> dvec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_double2_precision.hpp b/third_party/glm/ext/vector_double2_precision.hpp deleted file mode 100755 index fa53940..0000000 --- a/third_party/glm/ext/vector_double2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_double2_precision.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 2 components vector of high double-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, double, highp> highp_dvec2; - - /// 2 components vector of medium double-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, double, mediump> mediump_dvec2; - - /// 2 components vector of low double-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, double, lowp> lowp_dvec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_double3.hpp b/third_party/glm/ext/vector_double3.hpp deleted file mode 100755 index 6dfe4c6..0000000 --- a/third_party/glm/ext/vector_double3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_double3.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 3 components vector of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<3, double, defaultp> dvec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_double3_precision.hpp b/third_party/glm/ext/vector_double3_precision.hpp deleted file mode 100755 index a8cfa37..0000000 --- a/third_party/glm/ext/vector_double3_precision.hpp +++ /dev/null @@ -1,34 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_double3_precision.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 3 components vector of high double-qualifier floating-point numbers. - /// There is no guarantee on the actual qualifier. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, double, highp> highp_dvec3; - - /// 3 components vector of medium double-qualifier floating-point numbers. - /// There is no guarantee on the actual qualifier. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, double, mediump> mediump_dvec3; - - /// 3 components vector of low double-qualifier floating-point numbers. - /// There is no guarantee on the actual qualifier. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, double, lowp> lowp_dvec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_double4.hpp b/third_party/glm/ext/vector_double4.hpp deleted file mode 100755 index 87f225f..0000000 --- a/third_party/glm/ext/vector_double4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_double4.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 4 components vector of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<4, double, defaultp> dvec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_double4_precision.hpp b/third_party/glm/ext/vector_double4_precision.hpp deleted file mode 100755 index 09cafa1..0000000 --- a/third_party/glm/ext/vector_double4_precision.hpp +++ /dev/null @@ -1,35 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_double4_precision.hpp - -#pragma once -#include "../detail/setup.hpp" -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 4 components vector of high double-qualifier floating-point numbers. - /// There is no guarantee on the actual qualifier. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, double, highp> highp_dvec4; - - /// 4 components vector of medium double-qualifier floating-point numbers. - /// There is no guarantee on the actual qualifier. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, double, mediump> mediump_dvec4; - - /// 4 components vector of low double-qualifier floating-point numbers. - /// There is no guarantee on the actual qualifier. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, double, lowp> lowp_dvec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float1.hpp b/third_party/glm/ext/vector_float1.hpp deleted file mode 100755 index 28acc2c..0000000 --- a/third_party/glm/ext/vector_float1.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref ext_vector_float1 -/// @file glm/ext/vector_float1.hpp -/// -/// @defgroup ext_vector_float1 GLM_EXT_vector_float1 -/// @ingroup ext -/// -/// Exposes single-precision floating point vector type with one component. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_float1_precision extension. -/// @see ext_vector_double1 extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_float1 extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_float1 - /// @{ - - /// 1 components vector of single-precision floating-point numbers. - typedef vec<1, float, defaultp> vec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float1_precision.hpp b/third_party/glm/ext/vector_float1_precision.hpp deleted file mode 100755 index 6e8dad8..0000000 --- a/third_party/glm/ext/vector_float1_precision.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref ext_vector_float1_precision -/// @file glm/ext/vector_float1_precision.hpp -/// -/// @defgroup ext_vector_float1_precision GLM_EXT_vector_float1_precision -/// @ingroup ext -/// -/// Exposes highp_vec1, mediump_vec1 and lowp_vec1 types. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_float1 extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_float1_precision extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_float1_precision - /// @{ - - /// 1 component vector of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<1, float, highp> highp_vec1; - - /// 1 component vector of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<1, float, mediump> mediump_vec1; - - /// 1 component vector of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<1, float, lowp> lowp_vec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float2.hpp b/third_party/glm/ext/vector_float2.hpp deleted file mode 100755 index d31545d..0000000 --- a/third_party/glm/ext/vector_float2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_float2.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 2 components vector of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<2, float, defaultp> vec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float2_precision.hpp b/third_party/glm/ext/vector_float2_precision.hpp deleted file mode 100755 index 23c0820..0000000 --- a/third_party/glm/ext/vector_float2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_float2_precision.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 2 components vector of high single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, float, highp> highp_vec2; - - /// 2 components vector of medium single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, float, mediump> mediump_vec2; - - /// 2 components vector of low single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, float, lowp> lowp_vec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float3.hpp b/third_party/glm/ext/vector_float3.hpp deleted file mode 100755 index cd79a62..0000000 --- a/third_party/glm/ext/vector_float3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_float3.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 3 components vector of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<3, float, defaultp> vec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float3_precision.hpp b/third_party/glm/ext/vector_float3_precision.hpp deleted file mode 100755 index be640b5..0000000 --- a/third_party/glm/ext/vector_float3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_float3_precision.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 3 components vector of high single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, float, highp> highp_vec3; - - /// 3 components vector of medium single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, float, mediump> mediump_vec3; - - /// 3 components vector of low single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, float, lowp> lowp_vec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float4.hpp b/third_party/glm/ext/vector_float4.hpp deleted file mode 100755 index d84adcc..0000000 --- a/third_party/glm/ext/vector_float4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_float4.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 4 components vector of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<4, float, defaultp> vec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float4_precision.hpp b/third_party/glm/ext/vector_float4_precision.hpp deleted file mode 100755 index aede838..0000000 --- a/third_party/glm/ext/vector_float4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_float4_precision.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 4 components vector of high single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, float, highp> highp_vec4; - - /// 4 components vector of medium single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, float, mediump> mediump_vec4; - - /// 4 components vector of low single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, float, lowp> lowp_vec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_int1.hpp b/third_party/glm/ext/vector_int1.hpp deleted file mode 100755 index dc86038..0000000 --- a/third_party/glm/ext/vector_int1.hpp +++ /dev/null @@ -1,32 +0,0 @@ -/// @ref ext_vector_int1 -/// @file glm/ext/vector_int1.hpp -/// -/// @defgroup ext_vector_int1 GLM_EXT_vector_int1 -/// @ingroup ext -/// -/// Exposes ivec1 vector type. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_uint1 extension. -/// @see ext_vector_int1_precision extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_int1 extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_int1 - /// @{ - - /// 1 component vector of signed integer numbers. - typedef vec<1, int, defaultp> ivec1; - - /// @} -}//namespace glm - diff --git a/third_party/glm/ext/vector_int1_precision.hpp b/third_party/glm/ext/vector_int1_precision.hpp deleted file mode 100755 index 3323954..0000000 --- a/third_party/glm/ext/vector_int1_precision.hpp +++ /dev/null @@ -1,34 +0,0 @@ -/// @ref ext_vector_int1_precision -/// @file glm/ext/vector_int1_precision.hpp -/// -/// @defgroup ext_vector_int1_precision GLM_EXT_vector_int1_precision -/// @ingroup ext -/// -/// Exposes highp_ivec1, mediump_ivec1 and lowp_ivec1 types. -/// -/// Include to use the features of this extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_int1_precision extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_int1_precision - /// @{ - - /// 1 component vector of signed integer values. - typedef vec<1, int, highp> highp_ivec1; - - /// 1 component vector of signed integer values. - typedef vec<1, int, mediump> mediump_ivec1; - - /// 1 component vector of signed integer values. - typedef vec<1, int, lowp> lowp_ivec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_int2.hpp b/third_party/glm/ext/vector_int2.hpp deleted file mode 100755 index aef803e..0000000 --- a/third_party/glm/ext/vector_int2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_int2.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 2 components vector of signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<2, int, defaultp> ivec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_int2_precision.hpp b/third_party/glm/ext/vector_int2_precision.hpp deleted file mode 100755 index 97315fc..0000000 --- a/third_party/glm/ext/vector_int2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_int2_precision.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 2 components vector of high qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, int, highp> highp_ivec2; - - /// 2 components vector of medium qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, int, mediump> mediump_ivec2; - - /// 2 components vector of low qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, int, lowp> lowp_ivec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_int3.hpp b/third_party/glm/ext/vector_int3.hpp deleted file mode 100755 index 4767e61..0000000 --- a/third_party/glm/ext/vector_int3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_int3.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 3 components vector of signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<3, int, defaultp> ivec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_int3_precision.hpp b/third_party/glm/ext/vector_int3_precision.hpp deleted file mode 100755 index 2cd3f5f..0000000 --- a/third_party/glm/ext/vector_int3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_int3_precision.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 3 components vector of high qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, int, highp> highp_ivec3; - - /// 3 components vector of medium qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, int, mediump> mediump_ivec3; - - /// 3 components vector of low qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, int, lowp> lowp_ivec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_int4.hpp b/third_party/glm/ext/vector_int4.hpp deleted file mode 100755 index bb23adf..0000000 --- a/third_party/glm/ext/vector_int4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_int4.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 4 components vector of signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<4, int, defaultp> ivec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_int4_precision.hpp b/third_party/glm/ext/vector_int4_precision.hpp deleted file mode 100755 index 4fcd791..0000000 --- a/third_party/glm/ext/vector_int4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_int4_precision.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 4 components vector of high qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, int, highp> highp_ivec4; - - /// 4 components vector of medium qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, int, mediump> mediump_ivec4; - - /// 4 components vector of low qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, int, lowp> lowp_ivec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_integer.hpp b/third_party/glm/ext/vector_integer.hpp deleted file mode 100755 index 1304dd8..0000000 --- a/third_party/glm/ext/vector_integer.hpp +++ /dev/null @@ -1,149 +0,0 @@ -/// @ref ext_vector_integer -/// @file glm/ext/vector_integer.hpp -/// -/// @see core (dependence) -/// @see ext_vector_integer (dependence) -/// -/// @defgroup ext_vector_integer GLM_EXT_vector_integer -/// @ingroup ext -/// -/// Include to use the features of this extension. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/_vectorize.hpp" -#include "../vector_relational.hpp" -#include "../common.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_integer extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_integer - /// @{ - - /// Return true if the value is a power of two number. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec isPowerOfTwo(vec const& v); - - /// Return the power of two number which value is just higher the input value, - /// round up to a power of two. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec nextPowerOfTwo(vec const& v); - - /// Return the power of two number which value is just lower the input value, - /// round down to a power of two. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec prevPowerOfTwo(vec const& v); - - /// Return true if the 'Value' is a multiple of 'Multiple'. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec isMultiple(vec const& v, T Multiple); - - /// Return true if the 'Value' is a multiple of 'Multiple'. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec isMultiple(vec const& v, vec const& Multiple); - - /// Higher multiple number of Source. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @param v Source values to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec nextMultiple(vec const& v, T Multiple); - - /// Higher multiple number of Source. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @param v Source values to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec nextMultiple(vec const& v, vec const& Multiple); - - /// Lower multiple number of Source. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @param v Source values to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec prevMultiple(vec const& v, T Multiple); - - /// Lower multiple number of Source. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @param v Source values to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec prevMultiple(vec const& v, vec const& Multiple); - - /// Returns the bit number of the Nth significant bit set to - /// 1 in the binary representation of value. - /// If value bitcount is less than the Nth significant bit, -1 will be returned. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Signed or unsigned integer scalar types. - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec findNSB(vec const& Source, vec SignificantBitCount); - - /// @} -} //namespace glm - -#include "vector_integer.inl" diff --git a/third_party/glm/ext/vector_integer.inl b/third_party/glm/ext/vector_integer.inl deleted file mode 100755 index 939ff5e..0000000 --- a/third_party/glm/ext/vector_integer.inl +++ /dev/null @@ -1,85 +0,0 @@ -#include "scalar_integer.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec isPowerOfTwo(vec const& Value) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isPowerOfTwo' only accept integer inputs"); - - vec const Result(abs(Value)); - return equal(Result & (Result - vec(1)), vec(0)); - } - - template - GLM_FUNC_QUALIFIER vec nextPowerOfTwo(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextPowerOfTwo' only accept integer inputs"); - - return detail::compute_ceilPowerOfTwo::is_signed>::call(v); - } - - template - GLM_FUNC_QUALIFIER vec prevPowerOfTwo(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevPowerOfTwo' only accept integer inputs"); - - return detail::functor1::call(prevPowerOfTwo, v); - } - - template - GLM_FUNC_QUALIFIER vec isMultiple(vec const& Value, T Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isMultiple' only accept integer inputs"); - - return (Value % Multiple) == vec(0); - } - - template - GLM_FUNC_QUALIFIER vec isMultiple(vec const& Value, vec const& Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isMultiple' only accept integer inputs"); - - return (Value % Multiple) == vec(0); - } - - template - GLM_FUNC_QUALIFIER vec nextMultiple(vec const& Source, T Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextMultiple' only accept integer inputs"); - - return detail::functor2::call(nextMultiple, Source, vec(Multiple)); - } - - template - GLM_FUNC_QUALIFIER vec nextMultiple(vec const& Source, vec const& Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextMultiple' only accept integer inputs"); - - return detail::functor2::call(nextMultiple, Source, Multiple); - } - - template - GLM_FUNC_QUALIFIER vec prevMultiple(vec const& Source, T Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevMultiple' only accept integer inputs"); - - return detail::functor2::call(prevMultiple, Source, vec(Multiple)); - } - - template - GLM_FUNC_QUALIFIER vec prevMultiple(vec const& Source, vec const& Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevMultiple' only accept integer inputs"); - - return detail::functor2::call(prevMultiple, Source, Multiple); - } - - template - GLM_FUNC_QUALIFIER vec findNSB(vec const& Source, vec SignificantBitCount) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findNSB' only accept integer inputs"); - - return detail::functor2_vec_int::call(findNSB, Source, SignificantBitCount); - } -}//namespace glm diff --git a/third_party/glm/ext/vector_relational.hpp b/third_party/glm/ext/vector_relational.hpp deleted file mode 100755 index 1c2367d..0000000 --- a/third_party/glm/ext/vector_relational.hpp +++ /dev/null @@ -1,107 +0,0 @@ -/// @ref ext_vector_relational -/// @file glm/ext/vector_relational.hpp -/// -/// @see core (dependence) -/// @see ext_scalar_integer (dependence) -/// -/// @defgroup ext_vector_relational GLM_EXT_vector_relational -/// @ingroup ext -/// -/// Exposes comparison functions for vector types that take a user defined epsilon values. -/// -/// Include to use the features of this extension. -/// -/// @see core_vector_relational -/// @see ext_scalar_relational -/// @see ext_matrix_relational - -#pragma once - -// Dependencies -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_relational extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_relational - /// @{ - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, T epsilon); - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& epsilon); - - /// Returns the component-wise comparison of |x - y| >= epsilon. - /// True if this expression is not satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, T epsilon); - - /// Returns the component-wise comparison of |x - y| >= epsilon. - /// True if this expression is not satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& epsilon); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, int ULPs); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& ULPs); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is not satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, int ULPs); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is not satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& ULPs); - - /// @} -}//namespace glm - -#include "vector_relational.inl" diff --git a/third_party/glm/ext/vector_relational.inl b/third_party/glm/ext/vector_relational.inl deleted file mode 100755 index 7a39ab5..0000000 --- a/third_party/glm/ext/vector_relational.inl +++ /dev/null @@ -1,75 +0,0 @@ -#include "../vector_relational.hpp" -#include "../common.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/type_float.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, T Epsilon) - { - return equal(x, y, vec(Epsilon)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& Epsilon) - { - return lessThanEqual(abs(x - y), Epsilon); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, T Epsilon) - { - return notEqual(x, y, vec(Epsilon)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& Epsilon) - { - return greaterThan(abs(x - y), Epsilon); - } - - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, int MaxULPs) - { - return equal(x, y, vec(MaxULPs)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& MaxULPs) - { - vec Result(false); - for(length_t i = 0; i < L; ++i) - { - detail::float_t const a(x[i]); - detail::float_t const b(y[i]); - - // Different signs means they do not match. - if(a.negative() != b.negative()) - { - // Check for equality to make sure +0==-0 - Result[i] = a.mantissa() == b.mantissa() && a.exponent() == b.exponent(); - } - else - { - // Find the difference in ULPs. - typename detail::float_t::int_type const DiffULPs = abs(a.i - b.i); - Result[i] = DiffULPs <= MaxULPs[i]; - } - } - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, int MaxULPs) - { - return notEqual(x, y, vec(MaxULPs)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& MaxULPs) - { - return not_(equal(x, y, MaxULPs)); - } -}//namespace glm diff --git a/third_party/glm/ext/vector_uint1.hpp b/third_party/glm/ext/vector_uint1.hpp deleted file mode 100755 index eb8a704..0000000 --- a/third_party/glm/ext/vector_uint1.hpp +++ /dev/null @@ -1,32 +0,0 @@ -/// @ref ext_vector_uint1 -/// @file glm/ext/vector_uint1.hpp -/// -/// @defgroup ext_vector_uint1 GLM_EXT_vector_uint1 -/// @ingroup ext -/// -/// Exposes uvec1 vector type. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_int1 extension. -/// @see ext_vector_uint1_precision extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_uint1 extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_uint1 - /// @{ - - /// 1 component vector of unsigned integer numbers. - typedef vec<1, unsigned int, defaultp> uvec1; - - /// @} -}//namespace glm - diff --git a/third_party/glm/ext/vector_uint1_precision.hpp b/third_party/glm/ext/vector_uint1_precision.hpp deleted file mode 100755 index 30daa5b..0000000 --- a/third_party/glm/ext/vector_uint1_precision.hpp +++ /dev/null @@ -1,40 +0,0 @@ -/// @ref ext_vector_uint1_precision -/// @file glm/ext/vector_uint1_precision.hpp -/// -/// @defgroup ext_vector_uint1_precision GLM_EXT_vector_uint1_precision -/// @ingroup ext -/// -/// Exposes highp_uvec1, mediump_uvec1 and lowp_uvec1 types. -/// -/// Include to use the features of this extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_uint1_precision extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_uint1_precision - /// @{ - - /// 1 component vector of unsigned integer values. - /// - /// @see ext_vector_uint1_precision - typedef vec<1, unsigned int, highp> highp_uvec1; - - /// 1 component vector of unsigned integer values. - /// - /// @see ext_vector_uint1_precision - typedef vec<1, unsigned int, mediump> mediump_uvec1; - - /// 1 component vector of unsigned integer values. - /// - /// @see ext_vector_uint1_precision - typedef vec<1, unsigned int, lowp> lowp_uvec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_uint2.hpp b/third_party/glm/ext/vector_uint2.hpp deleted file mode 100755 index 03c00f5..0000000 --- a/third_party/glm/ext/vector_uint2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_uint2.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 2 components vector of unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<2, unsigned int, defaultp> uvec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_uint2_precision.hpp b/third_party/glm/ext/vector_uint2_precision.hpp deleted file mode 100755 index 2ba7b0d..0000000 --- a/third_party/glm/ext/vector_uint2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_uint2_precision.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 2 components vector of high qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, unsigned int, highp> highp_uvec2; - - /// 2 components vector of medium qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, unsigned int, mediump> mediump_uvec2; - - /// 2 components vector of low qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, unsigned int, lowp> lowp_uvec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_uint3.hpp b/third_party/glm/ext/vector_uint3.hpp deleted file mode 100755 index f5b41c4..0000000 --- a/third_party/glm/ext/vector_uint3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_uint3.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 3 components vector of unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<3, unsigned int, defaultp> uvec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_uint3_precision.hpp b/third_party/glm/ext/vector_uint3_precision.hpp deleted file mode 100755 index 125191c..0000000 --- a/third_party/glm/ext/vector_uint3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_uint3_precision.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 3 components vector of high qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, unsigned int, highp> highp_uvec3; - - /// 3 components vector of medium qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, unsigned int, mediump> mediump_uvec3; - - /// 3 components vector of low qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, unsigned int, lowp> lowp_uvec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_uint4.hpp b/third_party/glm/ext/vector_uint4.hpp deleted file mode 100755 index 32ced58..0000000 --- a/third_party/glm/ext/vector_uint4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_uint4.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 4 components vector of unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<4, unsigned int, defaultp> uvec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_uint4_precision.hpp b/third_party/glm/ext/vector_uint4_precision.hpp deleted file mode 100755 index cf4097c..0000000 --- a/third_party/glm/ext/vector_uint4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_uint4_precision.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 4 components vector of high qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, unsigned int, highp> highp_uvec4; - - /// 4 components vector of medium qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, unsigned int, mediump> mediump_uvec4; - - /// 4 components vector of low qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, unsigned int, lowp> lowp_uvec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_ulp.hpp b/third_party/glm/ext/vector_ulp.hpp deleted file mode 100755 index 6210396..0000000 --- a/third_party/glm/ext/vector_ulp.hpp +++ /dev/null @@ -1,109 +0,0 @@ -/// @ref ext_vector_ulp -/// @file glm/ext/vector_ulp.hpp -/// -/// @defgroup ext_vector_ulp GLM_EXT_vector_ulp -/// @ingroup ext -/// -/// Allow the measurement of the accuracy of a function against a reference -/// implementation. This extension works on floating-point data and provide results -/// in ULP. -/// -/// Include to use the features of this extension. -/// -/// @see ext_scalar_ulp -/// @see ext_scalar_relational -/// @see ext_vector_relational - -#pragma once - -// Dependencies -#include "../ext/scalar_ulp.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_ulp extension included") -#endif - -namespace glm -{ - /// Return the next ULP value(s) after the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec nextFloat(vec const& x); - - /// Return the value(s) ULP distance after the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec nextFloat(vec const& x, int ULPs); - - /// Return the value(s) ULP distance after the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec nextFloat(vec const& x, vec const& ULPs); - - /// Return the previous ULP value(s) before the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec prevFloat(vec const& x); - - /// Return the value(s) ULP distance before the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec prevFloat(vec const& x, int ULPs); - - /// Return the value(s) ULP distance before the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec prevFloat(vec const& x, vec const& ULPs); - - /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec floatDistance(vec const& x, vec const& y); - - /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec floatDistance(vec const& x, vec const& y); - - /// @} -}//namespace glm - -#include "vector_ulp.inl" diff --git a/third_party/glm/ext/vector_ulp.inl b/third_party/glm/ext/vector_ulp.inl deleted file mode 100755 index 91565ce..0000000 --- a/third_party/glm/ext/vector_ulp.inl +++ /dev/null @@ -1,74 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec nextFloat(vec const& x) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = nextFloat(x[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec nextFloat(vec const& x, int ULPs) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = nextFloat(x[i], ULPs); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec nextFloat(vec const& x, vec const& ULPs) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = nextFloat(x[i], ULPs[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec prevFloat(vec const& x) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = prevFloat(x[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec prevFloat(vec const& x, int ULPs) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = prevFloat(x[i], ULPs); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec prevFloat(vec const& x, vec const& ULPs) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = prevFloat(x[i], ULPs[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec floatDistance(vec const& x, vec const& y) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = floatDistance(x[i], y[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec floatDistance(vec const& x, vec const& y) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = floatDistance(x[i], y[i]); - return Result; - } -}//namespace glm diff --git a/third_party/glm/fwd.hpp b/third_party/glm/fwd.hpp deleted file mode 100755 index 474d44f..0000000 --- a/third_party/glm/fwd.hpp +++ /dev/null @@ -1,818 +0,0 @@ -#pragma once - -#include "detail/qualifier.hpp" - -namespace glm -{ -#if GLM_HAS_EXTENDED_INTEGER_TYPE - typedef std::int8_t int8; - typedef std::int16_t int16; - typedef std::int32_t int32; - typedef std::int64_t int64; - - typedef std::uint8_t uint8; - typedef std::uint16_t uint16; - typedef std::uint32_t uint32; - typedef std::uint64_t uint64; -#else - typedef signed char int8; - typedef signed short int16; - typedef signed int int32; - typedef detail::int64 int64; - - typedef unsigned char uint8; - typedef unsigned short uint16; - typedef unsigned int uint32; - typedef detail::uint64 uint64; -#endif - - // Scalar int - - typedef int8 lowp_i8; - typedef int8 mediump_i8; - typedef int8 highp_i8; - typedef int8 i8; - - typedef int8 lowp_int8; - typedef int8 mediump_int8; - typedef int8 highp_int8; - - typedef int8 lowp_int8_t; - typedef int8 mediump_int8_t; - typedef int8 highp_int8_t; - typedef int8 int8_t; - - typedef int16 lowp_i16; - typedef int16 mediump_i16; - typedef int16 highp_i16; - typedef int16 i16; - - typedef int16 lowp_int16; - typedef int16 mediump_int16; - typedef int16 highp_int16; - - typedef int16 lowp_int16_t; - typedef int16 mediump_int16_t; - typedef int16 highp_int16_t; - typedef int16 int16_t; - - typedef int32 lowp_i32; - typedef int32 mediump_i32; - typedef int32 highp_i32; - typedef int32 i32; - - typedef int32 lowp_int32; - typedef int32 mediump_int32; - typedef int32 highp_int32; - - typedef int32 lowp_int32_t; - typedef int32 mediump_int32_t; - typedef int32 highp_int32_t; - typedef int32 int32_t; - - typedef int64 lowp_i64; - typedef int64 mediump_i64; - typedef int64 highp_i64; - typedef int64 i64; - - typedef int64 lowp_int64; - typedef int64 mediump_int64; - typedef int64 highp_int64; - - typedef int64 lowp_int64_t; - typedef int64 mediump_int64_t; - typedef int64 highp_int64_t; - typedef int64 int64_t; - - // Scalar uint - - typedef uint8 lowp_u8; - typedef uint8 mediump_u8; - typedef uint8 highp_u8; - typedef uint8 u8; - - typedef uint8 lowp_uint8; - typedef uint8 mediump_uint8; - typedef uint8 highp_uint8; - - typedef uint8 lowp_uint8_t; - typedef uint8 mediump_uint8_t; - typedef uint8 highp_uint8_t; - typedef uint8 uint8_t; - - typedef uint16 lowp_u16; - typedef uint16 mediump_u16; - typedef uint16 highp_u16; - typedef uint16 u16; - - typedef uint16 lowp_uint16; - typedef uint16 mediump_uint16; - typedef uint16 highp_uint16; - - typedef uint16 lowp_uint16_t; - typedef uint16 mediump_uint16_t; - typedef uint16 highp_uint16_t; - typedef uint16 uint16_t; - - typedef uint32 lowp_u32; - typedef uint32 mediump_u32; - typedef uint32 highp_u32; - typedef uint32 u32; - - typedef uint32 lowp_uint32; - typedef uint32 mediump_uint32; - typedef uint32 highp_uint32; - - typedef uint32 lowp_uint32_t; - typedef uint32 mediump_uint32_t; - typedef uint32 highp_uint32_t; - typedef uint32 uint32_t; - - typedef uint64 lowp_u64; - typedef uint64 mediump_u64; - typedef uint64 highp_u64; - typedef uint64 u64; - - typedef uint64 lowp_uint64; - typedef uint64 mediump_uint64; - typedef uint64 highp_uint64; - - typedef uint64 lowp_uint64_t; - typedef uint64 mediump_uint64_t; - typedef uint64 highp_uint64_t; - typedef uint64 uint64_t; - - // Scalar float - - typedef float lowp_f32; - typedef float mediump_f32; - typedef float highp_f32; - typedef float f32; - - typedef float lowp_float32; - typedef float mediump_float32; - typedef float highp_float32; - typedef float float32; - - typedef float lowp_float32_t; - typedef float mediump_float32_t; - typedef float highp_float32_t; - typedef float float32_t; - - - typedef double lowp_f64; - typedef double mediump_f64; - typedef double highp_f64; - typedef double f64; - - typedef double lowp_float64; - typedef double mediump_float64; - typedef double highp_float64; - typedef double float64; - - typedef double lowp_float64_t; - typedef double mediump_float64_t; - typedef double highp_float64_t; - typedef double float64_t; - - // Vector bool - - typedef vec<1, bool, lowp> lowp_bvec1; - typedef vec<2, bool, lowp> lowp_bvec2; - typedef vec<3, bool, lowp> lowp_bvec3; - typedef vec<4, bool, lowp> lowp_bvec4; - - typedef vec<1, bool, mediump> mediump_bvec1; - typedef vec<2, bool, mediump> mediump_bvec2; - typedef vec<3, bool, mediump> mediump_bvec3; - typedef vec<4, bool, mediump> mediump_bvec4; - - typedef vec<1, bool, highp> highp_bvec1; - typedef vec<2, bool, highp> highp_bvec2; - typedef vec<3, bool, highp> highp_bvec3; - typedef vec<4, bool, highp> highp_bvec4; - - typedef vec<1, bool, defaultp> bvec1; - typedef vec<2, bool, defaultp> bvec2; - typedef vec<3, bool, defaultp> bvec3; - typedef vec<4, bool, defaultp> bvec4; - - // Vector int - - typedef vec<1, i32, lowp> lowp_ivec1; - typedef vec<2, i32, lowp> lowp_ivec2; - typedef vec<3, i32, lowp> lowp_ivec3; - typedef vec<4, i32, lowp> lowp_ivec4; - - typedef vec<1, i32, mediump> mediump_ivec1; - typedef vec<2, i32, mediump> mediump_ivec2; - typedef vec<3, i32, mediump> mediump_ivec3; - typedef vec<4, i32, mediump> mediump_ivec4; - - typedef vec<1, i32, highp> highp_ivec1; - typedef vec<2, i32, highp> highp_ivec2; - typedef vec<3, i32, highp> highp_ivec3; - typedef vec<4, i32, highp> highp_ivec4; - - typedef vec<1, i32, defaultp> ivec1; - typedef vec<2, i32, defaultp> ivec2; - typedef vec<3, i32, defaultp> ivec3; - typedef vec<4, i32, defaultp> ivec4; - - typedef vec<1, i8, lowp> lowp_i8vec1; - typedef vec<2, i8, lowp> lowp_i8vec2; - typedef vec<3, i8, lowp> lowp_i8vec3; - typedef vec<4, i8, lowp> lowp_i8vec4; - - typedef vec<1, i8, mediump> mediump_i8vec1; - typedef vec<2, i8, mediump> mediump_i8vec2; - typedef vec<3, i8, mediump> mediump_i8vec3; - typedef vec<4, i8, mediump> mediump_i8vec4; - - typedef vec<1, i8, highp> highp_i8vec1; - typedef vec<2, i8, highp> highp_i8vec2; - typedef vec<3, i8, highp> highp_i8vec3; - typedef vec<4, i8, highp> highp_i8vec4; - - typedef vec<1, i8, defaultp> i8vec1; - typedef vec<2, i8, defaultp> i8vec2; - typedef vec<3, i8, defaultp> i8vec3; - typedef vec<4, i8, defaultp> i8vec4; - - typedef vec<1, i16, lowp> lowp_i16vec1; - typedef vec<2, i16, lowp> lowp_i16vec2; - typedef vec<3, i16, lowp> lowp_i16vec3; - typedef vec<4, i16, lowp> lowp_i16vec4; - - typedef vec<1, i16, mediump> mediump_i16vec1; - typedef vec<2, i16, mediump> mediump_i16vec2; - typedef vec<3, i16, mediump> mediump_i16vec3; - typedef vec<4, i16, mediump> mediump_i16vec4; - - typedef vec<1, i16, highp> highp_i16vec1; - typedef vec<2, i16, highp> highp_i16vec2; - typedef vec<3, i16, highp> highp_i16vec3; - typedef vec<4, i16, highp> highp_i16vec4; - - typedef vec<1, i16, defaultp> i16vec1; - typedef vec<2, i16, defaultp> i16vec2; - typedef vec<3, i16, defaultp> i16vec3; - typedef vec<4, i16, defaultp> i16vec4; - - typedef vec<1, i32, lowp> lowp_i32vec1; - typedef vec<2, i32, lowp> lowp_i32vec2; - typedef vec<3, i32, lowp> lowp_i32vec3; - typedef vec<4, i32, lowp> lowp_i32vec4; - - typedef vec<1, i32, mediump> mediump_i32vec1; - typedef vec<2, i32, mediump> mediump_i32vec2; - typedef vec<3, i32, mediump> mediump_i32vec3; - typedef vec<4, i32, mediump> mediump_i32vec4; - - typedef vec<1, i32, highp> highp_i32vec1; - typedef vec<2, i32, highp> highp_i32vec2; - typedef vec<3, i32, highp> highp_i32vec3; - typedef vec<4, i32, highp> highp_i32vec4; - - typedef vec<1, i32, defaultp> i32vec1; - typedef vec<2, i32, defaultp> i32vec2; - typedef vec<3, i32, defaultp> i32vec3; - typedef vec<4, i32, defaultp> i32vec4; - - typedef vec<1, i64, lowp> lowp_i64vec1; - typedef vec<2, i64, lowp> lowp_i64vec2; - typedef vec<3, i64, lowp> lowp_i64vec3; - typedef vec<4, i64, lowp> lowp_i64vec4; - - typedef vec<1, i64, mediump> mediump_i64vec1; - typedef vec<2, i64, mediump> mediump_i64vec2; - typedef vec<3, i64, mediump> mediump_i64vec3; - typedef vec<4, i64, mediump> mediump_i64vec4; - - typedef vec<1, i64, highp> highp_i64vec1; - typedef vec<2, i64, highp> highp_i64vec2; - typedef vec<3, i64, highp> highp_i64vec3; - typedef vec<4, i64, highp> highp_i64vec4; - - typedef vec<1, i64, defaultp> i64vec1; - typedef vec<2, i64, defaultp> i64vec2; - typedef vec<3, i64, defaultp> i64vec3; - typedef vec<4, i64, defaultp> i64vec4; - - // Vector uint - - typedef vec<1, u32, lowp> lowp_uvec1; - typedef vec<2, u32, lowp> lowp_uvec2; - typedef vec<3, u32, lowp> lowp_uvec3; - typedef vec<4, u32, lowp> lowp_uvec4; - - typedef vec<1, u32, mediump> mediump_uvec1; - typedef vec<2, u32, mediump> mediump_uvec2; - typedef vec<3, u32, mediump> mediump_uvec3; - typedef vec<4, u32, mediump> mediump_uvec4; - - typedef vec<1, u32, highp> highp_uvec1; - typedef vec<2, u32, highp> highp_uvec2; - typedef vec<3, u32, highp> highp_uvec3; - typedef vec<4, u32, highp> highp_uvec4; - - typedef vec<1, u32, defaultp> uvec1; - typedef vec<2, u32, defaultp> uvec2; - typedef vec<3, u32, defaultp> uvec3; - typedef vec<4, u32, defaultp> uvec4; - - typedef vec<1, u8, lowp> lowp_u8vec1; - typedef vec<2, u8, lowp> lowp_u8vec2; - typedef vec<3, u8, lowp> lowp_u8vec3; - typedef vec<4, u8, lowp> lowp_u8vec4; - - typedef vec<1, u8, mediump> mediump_u8vec1; - typedef vec<2, u8, mediump> mediump_u8vec2; - typedef vec<3, u8, mediump> mediump_u8vec3; - typedef vec<4, u8, mediump> mediump_u8vec4; - - typedef vec<1, u8, highp> highp_u8vec1; - typedef vec<2, u8, highp> highp_u8vec2; - typedef vec<3, u8, highp> highp_u8vec3; - typedef vec<4, u8, highp> highp_u8vec4; - - typedef vec<1, u8, defaultp> u8vec1; - typedef vec<2, u8, defaultp> u8vec2; - typedef vec<3, u8, defaultp> u8vec3; - typedef vec<4, u8, defaultp> u8vec4; - - typedef vec<1, u16, lowp> lowp_u16vec1; - typedef vec<2, u16, lowp> lowp_u16vec2; - typedef vec<3, u16, lowp> lowp_u16vec3; - typedef vec<4, u16, lowp> lowp_u16vec4; - - typedef vec<1, u16, mediump> mediump_u16vec1; - typedef vec<2, u16, mediump> mediump_u16vec2; - typedef vec<3, u16, mediump> mediump_u16vec3; - typedef vec<4, u16, mediump> mediump_u16vec4; - - typedef vec<1, u16, highp> highp_u16vec1; - typedef vec<2, u16, highp> highp_u16vec2; - typedef vec<3, u16, highp> highp_u16vec3; - typedef vec<4, u16, highp> highp_u16vec4; - - typedef vec<1, u16, defaultp> u16vec1; - typedef vec<2, u16, defaultp> u16vec2; - typedef vec<3, u16, defaultp> u16vec3; - typedef vec<4, u16, defaultp> u16vec4; - - typedef vec<1, u32, lowp> lowp_u32vec1; - typedef vec<2, u32, lowp> lowp_u32vec2; - typedef vec<3, u32, lowp> lowp_u32vec3; - typedef vec<4, u32, lowp> lowp_u32vec4; - - typedef vec<1, u32, mediump> mediump_u32vec1; - typedef vec<2, u32, mediump> mediump_u32vec2; - typedef vec<3, u32, mediump> mediump_u32vec3; - typedef vec<4, u32, mediump> mediump_u32vec4; - - typedef vec<1, u32, highp> highp_u32vec1; - typedef vec<2, u32, highp> highp_u32vec2; - typedef vec<3, u32, highp> highp_u32vec3; - typedef vec<4, u32, highp> highp_u32vec4; - - typedef vec<1, u32, defaultp> u32vec1; - typedef vec<2, u32, defaultp> u32vec2; - typedef vec<3, u32, defaultp> u32vec3; - typedef vec<4, u32, defaultp> u32vec4; - - typedef vec<1, u64, lowp> lowp_u64vec1; - typedef vec<2, u64, lowp> lowp_u64vec2; - typedef vec<3, u64, lowp> lowp_u64vec3; - typedef vec<4, u64, lowp> lowp_u64vec4; - - typedef vec<1, u64, mediump> mediump_u64vec1; - typedef vec<2, u64, mediump> mediump_u64vec2; - typedef vec<3, u64, mediump> mediump_u64vec3; - typedef vec<4, u64, mediump> mediump_u64vec4; - - typedef vec<1, u64, highp> highp_u64vec1; - typedef vec<2, u64, highp> highp_u64vec2; - typedef vec<3, u64, highp> highp_u64vec3; - typedef vec<4, u64, highp> highp_u64vec4; - - typedef vec<1, u64, defaultp> u64vec1; - typedef vec<2, u64, defaultp> u64vec2; - typedef vec<3, u64, defaultp> u64vec3; - typedef vec<4, u64, defaultp> u64vec4; - - // Vector float - - typedef vec<1, float, lowp> lowp_vec1; - typedef vec<2, float, lowp> lowp_vec2; - typedef vec<3, float, lowp> lowp_vec3; - typedef vec<4, float, lowp> lowp_vec4; - - typedef vec<1, float, mediump> mediump_vec1; - typedef vec<2, float, mediump> mediump_vec2; - typedef vec<3, float, mediump> mediump_vec3; - typedef vec<4, float, mediump> mediump_vec4; - - typedef vec<1, float, highp> highp_vec1; - typedef vec<2, float, highp> highp_vec2; - typedef vec<3, float, highp> highp_vec3; - typedef vec<4, float, highp> highp_vec4; - - typedef vec<1, float, defaultp> vec1; - typedef vec<2, float, defaultp> vec2; - typedef vec<3, float, defaultp> vec3; - typedef vec<4, float, defaultp> vec4; - - typedef vec<1, float, lowp> lowp_fvec1; - typedef vec<2, float, lowp> lowp_fvec2; - typedef vec<3, float, lowp> lowp_fvec3; - typedef vec<4, float, lowp> lowp_fvec4; - - typedef vec<1, float, mediump> mediump_fvec1; - typedef vec<2, float, mediump> mediump_fvec2; - typedef vec<3, float, mediump> mediump_fvec3; - typedef vec<4, float, mediump> mediump_fvec4; - - typedef vec<1, float, highp> highp_fvec1; - typedef vec<2, float, highp> highp_fvec2; - typedef vec<3, float, highp> highp_fvec3; - typedef vec<4, float, highp> highp_fvec4; - - typedef vec<1, f32, defaultp> fvec1; - typedef vec<2, f32, defaultp> fvec2; - typedef vec<3, f32, defaultp> fvec3; - typedef vec<4, f32, defaultp> fvec4; - - typedef vec<1, f32, lowp> lowp_f32vec1; - typedef vec<2, f32, lowp> lowp_f32vec2; - typedef vec<3, f32, lowp> lowp_f32vec3; - typedef vec<4, f32, lowp> lowp_f32vec4; - - typedef vec<1, f32, mediump> mediump_f32vec1; - typedef vec<2, f32, mediump> mediump_f32vec2; - typedef vec<3, f32, mediump> mediump_f32vec3; - typedef vec<4, f32, mediump> mediump_f32vec4; - - typedef vec<1, f32, highp> highp_f32vec1; - typedef vec<2, f32, highp> highp_f32vec2; - typedef vec<3, f32, highp> highp_f32vec3; - typedef vec<4, f32, highp> highp_f32vec4; - - typedef vec<1, f32, defaultp> f32vec1; - typedef vec<2, f32, defaultp> f32vec2; - typedef vec<3, f32, defaultp> f32vec3; - typedef vec<4, f32, defaultp> f32vec4; - - typedef vec<1, f64, lowp> lowp_dvec1; - typedef vec<2, f64, lowp> lowp_dvec2; - typedef vec<3, f64, lowp> lowp_dvec3; - typedef vec<4, f64, lowp> lowp_dvec4; - - typedef vec<1, f64, mediump> mediump_dvec1; - typedef vec<2, f64, mediump> mediump_dvec2; - typedef vec<3, f64, mediump> mediump_dvec3; - typedef vec<4, f64, mediump> mediump_dvec4; - - typedef vec<1, f64, highp> highp_dvec1; - typedef vec<2, f64, highp> highp_dvec2; - typedef vec<3, f64, highp> highp_dvec3; - typedef vec<4, f64, highp> highp_dvec4; - - typedef vec<1, f64, defaultp> dvec1; - typedef vec<2, f64, defaultp> dvec2; - typedef vec<3, f64, defaultp> dvec3; - typedef vec<4, f64, defaultp> dvec4; - - typedef vec<1, f64, lowp> lowp_f64vec1; - typedef vec<2, f64, lowp> lowp_f64vec2; - typedef vec<3, f64, lowp> lowp_f64vec3; - typedef vec<4, f64, lowp> lowp_f64vec4; - - typedef vec<1, f64, mediump> mediump_f64vec1; - typedef vec<2, f64, mediump> mediump_f64vec2; - typedef vec<3, f64, mediump> mediump_f64vec3; - typedef vec<4, f64, mediump> mediump_f64vec4; - - typedef vec<1, f64, highp> highp_f64vec1; - typedef vec<2, f64, highp> highp_f64vec2; - typedef vec<3, f64, highp> highp_f64vec3; - typedef vec<4, f64, highp> highp_f64vec4; - - typedef vec<1, f64, defaultp> f64vec1; - typedef vec<2, f64, defaultp> f64vec2; - typedef vec<3, f64, defaultp> f64vec3; - typedef vec<4, f64, defaultp> f64vec4; - - // Matrix NxN - - typedef mat<2, 2, f32, lowp> lowp_mat2; - typedef mat<3, 3, f32, lowp> lowp_mat3; - typedef mat<4, 4, f32, lowp> lowp_mat4; - - typedef mat<2, 2, f32, mediump> mediump_mat2; - typedef mat<3, 3, f32, mediump> mediump_mat3; - typedef mat<4, 4, f32, mediump> mediump_mat4; - - typedef mat<2, 2, f32, highp> highp_mat2; - typedef mat<3, 3, f32, highp> highp_mat3; - typedef mat<4, 4, f32, highp> highp_mat4; - - typedef mat<2, 2, f32, defaultp> mat2; - typedef mat<3, 3, f32, defaultp> mat3; - typedef mat<4, 4, f32, defaultp> mat4; - - typedef mat<2, 2, f32, lowp> lowp_fmat2; - typedef mat<3, 3, f32, lowp> lowp_fmat3; - typedef mat<4, 4, f32, lowp> lowp_fmat4; - - typedef mat<2, 2, f32, mediump> mediump_fmat2; - typedef mat<3, 3, f32, mediump> mediump_fmat3; - typedef mat<4, 4, f32, mediump> mediump_fmat4; - - typedef mat<2, 2, f32, highp> highp_fmat2; - typedef mat<3, 3, f32, highp> highp_fmat3; - typedef mat<4, 4, f32, highp> highp_fmat4; - - typedef mat<2, 2, f32, defaultp> fmat2; - typedef mat<3, 3, f32, defaultp> fmat3; - typedef mat<4, 4, f32, defaultp> fmat4; - - typedef mat<2, 2, f32, lowp> lowp_f32mat2; - typedef mat<3, 3, f32, lowp> lowp_f32mat3; - typedef mat<4, 4, f32, lowp> lowp_f32mat4; - - typedef mat<2, 2, f32, mediump> mediump_f32mat2; - typedef mat<3, 3, f32, mediump> mediump_f32mat3; - typedef mat<4, 4, f32, mediump> mediump_f32mat4; - - typedef mat<2, 2, f32, highp> highp_f32mat2; - typedef mat<3, 3, f32, highp> highp_f32mat3; - typedef mat<4, 4, f32, highp> highp_f32mat4; - - typedef mat<2, 2, f32, defaultp> f32mat2; - typedef mat<3, 3, f32, defaultp> f32mat3; - typedef mat<4, 4, f32, defaultp> f32mat4; - - typedef mat<2, 2, f64, lowp> lowp_dmat2; - typedef mat<3, 3, f64, lowp> lowp_dmat3; - typedef mat<4, 4, f64, lowp> lowp_dmat4; - - typedef mat<2, 2, f64, mediump> mediump_dmat2; - typedef mat<3, 3, f64, mediump> mediump_dmat3; - typedef mat<4, 4, f64, mediump> mediump_dmat4; - - typedef mat<2, 2, f64, highp> highp_dmat2; - typedef mat<3, 3, f64, highp> highp_dmat3; - typedef mat<4, 4, f64, highp> highp_dmat4; - - typedef mat<2, 2, f64, defaultp> dmat2; - typedef mat<3, 3, f64, defaultp> dmat3; - typedef mat<4, 4, f64, defaultp> dmat4; - - typedef mat<2, 2, f64, lowp> lowp_f64mat2; - typedef mat<3, 3, f64, lowp> lowp_f64mat3; - typedef mat<4, 4, f64, lowp> lowp_f64mat4; - - typedef mat<2, 2, f64, mediump> mediump_f64mat2; - typedef mat<3, 3, f64, mediump> mediump_f64mat3; - typedef mat<4, 4, f64, mediump> mediump_f64mat4; - - typedef mat<2, 2, f64, highp> highp_f64mat2; - typedef mat<3, 3, f64, highp> highp_f64mat3; - typedef mat<4, 4, f64, highp> highp_f64mat4; - - typedef mat<2, 2, f64, defaultp> f64mat2; - typedef mat<3, 3, f64, defaultp> f64mat3; - typedef mat<4, 4, f64, defaultp> f64mat4; - - // Matrix MxN - - typedef mat<2, 2, f32, lowp> lowp_mat2x2; - typedef mat<2, 3, f32, lowp> lowp_mat2x3; - typedef mat<2, 4, f32, lowp> lowp_mat2x4; - typedef mat<3, 2, f32, lowp> lowp_mat3x2; - typedef mat<3, 3, f32, lowp> lowp_mat3x3; - typedef mat<3, 4, f32, lowp> lowp_mat3x4; - typedef mat<4, 2, f32, lowp> lowp_mat4x2; - typedef mat<4, 3, f32, lowp> lowp_mat4x3; - typedef mat<4, 4, f32, lowp> lowp_mat4x4; - - typedef mat<2, 2, f32, mediump> mediump_mat2x2; - typedef mat<2, 3, f32, mediump> mediump_mat2x3; - typedef mat<2, 4, f32, mediump> mediump_mat2x4; - typedef mat<3, 2, f32, mediump> mediump_mat3x2; - typedef mat<3, 3, f32, mediump> mediump_mat3x3; - typedef mat<3, 4, f32, mediump> mediump_mat3x4; - typedef mat<4, 2, f32, mediump> mediump_mat4x2; - typedef mat<4, 3, f32, mediump> mediump_mat4x3; - typedef mat<4, 4, f32, mediump> mediump_mat4x4; - - typedef mat<2, 2, f32, highp> highp_mat2x2; - typedef mat<2, 3, f32, highp> highp_mat2x3; - typedef mat<2, 4, f32, highp> highp_mat2x4; - typedef mat<3, 2, f32, highp> highp_mat3x2; - typedef mat<3, 3, f32, highp> highp_mat3x3; - typedef mat<3, 4, f32, highp> highp_mat3x4; - typedef mat<4, 2, f32, highp> highp_mat4x2; - typedef mat<4, 3, f32, highp> highp_mat4x3; - typedef mat<4, 4, f32, highp> highp_mat4x4; - - typedef mat<2, 2, f32, defaultp> mat2x2; - typedef mat<3, 2, f32, defaultp> mat3x2; - typedef mat<4, 2, f32, defaultp> mat4x2; - typedef mat<2, 3, f32, defaultp> mat2x3; - typedef mat<3, 3, f32, defaultp> mat3x3; - typedef mat<4, 3, f32, defaultp> mat4x3; - typedef mat<2, 4, f32, defaultp> mat2x4; - typedef mat<3, 4, f32, defaultp> mat3x4; - typedef mat<4, 4, f32, defaultp> mat4x4; - - typedef mat<2, 2, f32, lowp> lowp_fmat2x2; - typedef mat<2, 3, f32, lowp> lowp_fmat2x3; - typedef mat<2, 4, f32, lowp> lowp_fmat2x4; - typedef mat<3, 2, f32, lowp> lowp_fmat3x2; - typedef mat<3, 3, f32, lowp> lowp_fmat3x3; - typedef mat<3, 4, f32, lowp> lowp_fmat3x4; - typedef mat<4, 2, f32, lowp> lowp_fmat4x2; - typedef mat<4, 3, f32, lowp> lowp_fmat4x3; - typedef mat<4, 4, f32, lowp> lowp_fmat4x4; - - typedef mat<2, 2, f32, mediump> mediump_fmat2x2; - typedef mat<2, 3, f32, mediump> mediump_fmat2x3; - typedef mat<2, 4, f32, mediump> mediump_fmat2x4; - typedef mat<3, 2, f32, mediump> mediump_fmat3x2; - typedef mat<3, 3, f32, mediump> mediump_fmat3x3; - typedef mat<3, 4, f32, mediump> mediump_fmat3x4; - typedef mat<4, 2, f32, mediump> mediump_fmat4x2; - typedef mat<4, 3, f32, mediump> mediump_fmat4x3; - typedef mat<4, 4, f32, mediump> mediump_fmat4x4; - - typedef mat<2, 2, f32, highp> highp_fmat2x2; - typedef mat<2, 3, f32, highp> highp_fmat2x3; - typedef mat<2, 4, f32, highp> highp_fmat2x4; - typedef mat<3, 2, f32, highp> highp_fmat3x2; - typedef mat<3, 3, f32, highp> highp_fmat3x3; - typedef mat<3, 4, f32, highp> highp_fmat3x4; - typedef mat<4, 2, f32, highp> highp_fmat4x2; - typedef mat<4, 3, f32, highp> highp_fmat4x3; - typedef mat<4, 4, f32, highp> highp_fmat4x4; - - typedef mat<2, 2, f32, defaultp> fmat2x2; - typedef mat<3, 2, f32, defaultp> fmat3x2; - typedef mat<4, 2, f32, defaultp> fmat4x2; - typedef mat<2, 3, f32, defaultp> fmat2x3; - typedef mat<3, 3, f32, defaultp> fmat3x3; - typedef mat<4, 3, f32, defaultp> fmat4x3; - typedef mat<2, 4, f32, defaultp> fmat2x4; - typedef mat<3, 4, f32, defaultp> fmat3x4; - typedef mat<4, 4, f32, defaultp> fmat4x4; - - typedef mat<2, 2, f32, lowp> lowp_f32mat2x2; - typedef mat<2, 3, f32, lowp> lowp_f32mat2x3; - typedef mat<2, 4, f32, lowp> lowp_f32mat2x4; - typedef mat<3, 2, f32, lowp> lowp_f32mat3x2; - typedef mat<3, 3, f32, lowp> lowp_f32mat3x3; - typedef mat<3, 4, f32, lowp> lowp_f32mat3x4; - typedef mat<4, 2, f32, lowp> lowp_f32mat4x2; - typedef mat<4, 3, f32, lowp> lowp_f32mat4x3; - typedef mat<4, 4, f32, lowp> lowp_f32mat4x4; - - typedef mat<2, 2, f32, mediump> mediump_f32mat2x2; - typedef mat<2, 3, f32, mediump> mediump_f32mat2x3; - typedef mat<2, 4, f32, mediump> mediump_f32mat2x4; - typedef mat<3, 2, f32, mediump> mediump_f32mat3x2; - typedef mat<3, 3, f32, mediump> mediump_f32mat3x3; - typedef mat<3, 4, f32, mediump> mediump_f32mat3x4; - typedef mat<4, 2, f32, mediump> mediump_f32mat4x2; - typedef mat<4, 3, f32, mediump> mediump_f32mat4x3; - typedef mat<4, 4, f32, mediump> mediump_f32mat4x4; - - typedef mat<2, 2, f32, highp> highp_f32mat2x2; - typedef mat<2, 3, f32, highp> highp_f32mat2x3; - typedef mat<2, 4, f32, highp> highp_f32mat2x4; - typedef mat<3, 2, f32, highp> highp_f32mat3x2; - typedef mat<3, 3, f32, highp> highp_f32mat3x3; - typedef mat<3, 4, f32, highp> highp_f32mat3x4; - typedef mat<4, 2, f32, highp> highp_f32mat4x2; - typedef mat<4, 3, f32, highp> highp_f32mat4x3; - typedef mat<4, 4, f32, highp> highp_f32mat4x4; - - typedef mat<2, 2, f32, defaultp> f32mat2x2; - typedef mat<3, 2, f32, defaultp> f32mat3x2; - typedef mat<4, 2, f32, defaultp> f32mat4x2; - typedef mat<2, 3, f32, defaultp> f32mat2x3; - typedef mat<3, 3, f32, defaultp> f32mat3x3; - typedef mat<4, 3, f32, defaultp> f32mat4x3; - typedef mat<2, 4, f32, defaultp> f32mat2x4; - typedef mat<3, 4, f32, defaultp> f32mat3x4; - typedef mat<4, 4, f32, defaultp> f32mat4x4; - - typedef mat<2, 2, double, lowp> lowp_dmat2x2; - typedef mat<2, 3, double, lowp> lowp_dmat2x3; - typedef mat<2, 4, double, lowp> lowp_dmat2x4; - typedef mat<3, 2, double, lowp> lowp_dmat3x2; - typedef mat<3, 3, double, lowp> lowp_dmat3x3; - typedef mat<3, 4, double, lowp> lowp_dmat3x4; - typedef mat<4, 2, double, lowp> lowp_dmat4x2; - typedef mat<4, 3, double, lowp> lowp_dmat4x3; - typedef mat<4, 4, double, lowp> lowp_dmat4x4; - - typedef mat<2, 2, double, mediump> mediump_dmat2x2; - typedef mat<2, 3, double, mediump> mediump_dmat2x3; - typedef mat<2, 4, double, mediump> mediump_dmat2x4; - typedef mat<3, 2, double, mediump> mediump_dmat3x2; - typedef mat<3, 3, double, mediump> mediump_dmat3x3; - typedef mat<3, 4, double, mediump> mediump_dmat3x4; - typedef mat<4, 2, double, mediump> mediump_dmat4x2; - typedef mat<4, 3, double, mediump> mediump_dmat4x3; - typedef mat<4, 4, double, mediump> mediump_dmat4x4; - - typedef mat<2, 2, double, highp> highp_dmat2x2; - typedef mat<2, 3, double, highp> highp_dmat2x3; - typedef mat<2, 4, double, highp> highp_dmat2x4; - typedef mat<3, 2, double, highp> highp_dmat3x2; - typedef mat<3, 3, double, highp> highp_dmat3x3; - typedef mat<3, 4, double, highp> highp_dmat3x4; - typedef mat<4, 2, double, highp> highp_dmat4x2; - typedef mat<4, 3, double, highp> highp_dmat4x3; - typedef mat<4, 4, double, highp> highp_dmat4x4; - - typedef mat<2, 2, double, defaultp> dmat2x2; - typedef mat<3, 2, double, defaultp> dmat3x2; - typedef mat<4, 2, double, defaultp> dmat4x2; - typedef mat<2, 3, double, defaultp> dmat2x3; - typedef mat<3, 3, double, defaultp> dmat3x3; - typedef mat<4, 3, double, defaultp> dmat4x3; - typedef mat<2, 4, double, defaultp> dmat2x4; - typedef mat<3, 4, double, defaultp> dmat3x4; - typedef mat<4, 4, double, defaultp> dmat4x4; - - typedef mat<2, 2, f64, lowp> lowp_f64mat2x2; - typedef mat<2, 3, f64, lowp> lowp_f64mat2x3; - typedef mat<2, 4, f64, lowp> lowp_f64mat2x4; - typedef mat<3, 2, f64, lowp> lowp_f64mat3x2; - typedef mat<3, 3, f64, lowp> lowp_f64mat3x3; - typedef mat<3, 4, f64, lowp> lowp_f64mat3x4; - typedef mat<4, 2, f64, lowp> lowp_f64mat4x2; - typedef mat<4, 3, f64, lowp> lowp_f64mat4x3; - typedef mat<4, 4, f64, lowp> lowp_f64mat4x4; - - typedef mat<2, 2, f64, mediump> mediump_f64mat2x2; - typedef mat<2, 3, f64, mediump> mediump_f64mat2x3; - typedef mat<2, 4, f64, mediump> mediump_f64mat2x4; - typedef mat<3, 2, f64, mediump> mediump_f64mat3x2; - typedef mat<3, 3, f64, mediump> mediump_f64mat3x3; - typedef mat<3, 4, f64, mediump> mediump_f64mat3x4; - typedef mat<4, 2, f64, mediump> mediump_f64mat4x2; - typedef mat<4, 3, f64, mediump> mediump_f64mat4x3; - typedef mat<4, 4, f64, mediump> mediump_f64mat4x4; - - typedef mat<2, 2, f64, highp> highp_f64mat2x2; - typedef mat<2, 3, f64, highp> highp_f64mat2x3; - typedef mat<2, 4, f64, highp> highp_f64mat2x4; - typedef mat<3, 2, f64, highp> highp_f64mat3x2; - typedef mat<3, 3, f64, highp> highp_f64mat3x3; - typedef mat<3, 4, f64, highp> highp_f64mat3x4; - typedef mat<4, 2, f64, highp> highp_f64mat4x2; - typedef mat<4, 3, f64, highp> highp_f64mat4x3; - typedef mat<4, 4, f64, highp> highp_f64mat4x4; - - typedef mat<2, 2, f64, defaultp> f64mat2x2; - typedef mat<3, 2, f64, defaultp> f64mat3x2; - typedef mat<4, 2, f64, defaultp> f64mat4x2; - typedef mat<2, 3, f64, defaultp> f64mat2x3; - typedef mat<3, 3, f64, defaultp> f64mat3x3; - typedef mat<4, 3, f64, defaultp> f64mat4x3; - typedef mat<2, 4, f64, defaultp> f64mat2x4; - typedef mat<3, 4, f64, defaultp> f64mat3x4; - typedef mat<4, 4, f64, defaultp> f64mat4x4; - - // Quaternion - - typedef qua lowp_quat; - typedef qua mediump_quat; - typedef qua highp_quat; - typedef qua quat; - - typedef qua lowp_fquat; - typedef qua mediump_fquat; - typedef qua highp_fquat; - typedef qua fquat; - - typedef qua lowp_f32quat; - typedef qua mediump_f32quat; - typedef qua highp_f32quat; - typedef qua f32quat; - - typedef qua lowp_dquat; - typedef qua mediump_dquat; - typedef qua highp_dquat; - typedef qua dquat; - - typedef qua lowp_f64quat; - typedef qua mediump_f64quat; - typedef qua highp_f64quat; - typedef qua f64quat; -}//namespace glm - - diff --git a/third_party/glm/geometric.hpp b/third_party/glm/geometric.hpp deleted file mode 100755 index c068a3c..0000000 --- a/third_party/glm/geometric.hpp +++ /dev/null @@ -1,116 +0,0 @@ -/// @ref core -/// @file glm/geometric.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions -/// -/// @defgroup core_func_geometric Geometric functions -/// @ingroup core -/// -/// These operate on vectors as vectors, not component-wise. -/// -/// Include to use these core features. - -#pragma once - -#include "detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_func_geometric - /// @{ - - /// Returns the length of x, i.e., sqrt(x * x). - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL length man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL T length(vec const& x); - - /// Returns the distance betwwen p0 and p1, i.e., length(p0 - p1). - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL distance man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL T distance(vec const& p0, vec const& p1); - - /// Returns the dot product of x and y, i.e., result = x * y. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL dot man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL T dot(vec const& x, vec const& y); - - /// Returns the cross product of x and y. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL cross man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL vec<3, T, Q> cross(vec<3, T, Q> const& x, vec<3, T, Q> const& y); - - /// Returns a vector in the same direction as x but with length of 1. - /// According to issue 10 GLSL 1.10 specification, if length(x) == 0 then result is undefined and generate an error. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL normalize man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL vec normalize(vec const& x); - - /// If dot(Nref, I) < 0.0, return N, otherwise, return -N. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL faceforward man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL vec faceforward( - vec const& N, - vec const& I, - vec const& Nref); - - /// For the incident vector I and surface orientation N, - /// returns the reflection direction : result = I - 2.0 * dot(N, I) * N. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL reflect man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL vec reflect( - vec const& I, - vec const& N); - - /// For the incident vector I and surface normal N, - /// and the ratio of indices of refraction eta, - /// return the refraction vector. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL refract man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL vec refract( - vec const& I, - vec const& N, - T eta); - - /// @} -}//namespace glm - -#include "detail/func_geometric.inl" diff --git a/third_party/glm/glm.hpp b/third_party/glm/glm.hpp deleted file mode 100755 index 8b61064..0000000 --- a/third_party/glm/glm.hpp +++ /dev/null @@ -1,136 +0,0 @@ -/// @ref core -/// @file glm/glm.hpp -/// -/// @defgroup core Core features -/// -/// @brief Features that implement in C++ the GLSL specification as closely as possible. -/// -/// The GLM core consists of C++ types that mirror GLSL types and -/// C++ functions that mirror the GLSL functions. -/// -/// The best documentation for GLM Core is the current GLSL specification, -/// version 4.2 -/// (pdf file). -/// -/// GLM core functionalities require to be included to be used. -/// -/// -/// @defgroup core_vector Vector types -/// -/// Vector types of two to four components with an exhaustive set of operators. -/// -/// @ingroup core -/// -/// -/// @defgroup core_vector_precision Vector types with precision qualifiers -/// -/// @brief Vector types with precision qualifiers which may result in various precision in term of ULPs -/// -/// GLSL allows defining qualifiers for particular variables. -/// With OpenGL's GLSL, these qualifiers have no effect; they are there for compatibility, -/// with OpenGL ES's GLSL, these qualifiers do have an effect. -/// -/// C++ has no language equivalent to qualifier qualifiers. So GLM provides the next-best thing: -/// a number of typedefs that use a particular qualifier. -/// -/// None of these types make any guarantees about the actual qualifier used. -/// -/// @ingroup core -/// -/// -/// @defgroup core_matrix Matrix types -/// -/// Matrix types of with C columns and R rows where C and R are values between 2 to 4 included. -/// These types have exhaustive sets of operators. -/// -/// @ingroup core -/// -/// -/// @defgroup core_matrix_precision Matrix types with precision qualifiers -/// -/// @brief Matrix types with precision qualifiers which may result in various precision in term of ULPs -/// -/// GLSL allows defining qualifiers for particular variables. -/// With OpenGL's GLSL, these qualifiers have no effect; they are there for compatibility, -/// with OpenGL ES's GLSL, these qualifiers do have an effect. -/// -/// C++ has no language equivalent to qualifier qualifiers. So GLM provides the next-best thing: -/// a number of typedefs that use a particular qualifier. -/// -/// None of these types make any guarantees about the actual qualifier used. -/// -/// @ingroup core -/// -/// -/// @defgroup ext Stable extensions -/// -/// @brief Additional features not specified by GLSL specification. -/// -/// EXT extensions are fully tested and documented. -/// -/// Even if it's highly unrecommended, it's possible to include all the extensions at once by -/// including . Otherwise, each extension needs to be included a specific file. -/// -/// -/// @defgroup gtc Recommended extensions -/// -/// @brief Additional features not specified by GLSL specification. -/// -/// GTC extensions aim to be stable with tests and documentation. -/// -/// Even if it's highly unrecommended, it's possible to include all the extensions at once by -/// including . Otherwise, each extension needs to be included a specific file. -/// -/// -/// @defgroup gtx Experimental extensions -/// -/// @brief Experimental features not specified by GLSL specification. -/// -/// Experimental extensions are useful functions and types, but the development of -/// their API and functionality is not necessarily stable. They can change -/// substantially between versions. Backwards compatibility is not much of an issue -/// for them. -/// -/// Even if it's highly unrecommended, it's possible to include all the extensions -/// at once by including . Otherwise, each extension needs to be -/// included a specific file. -/// -/// @mainpage OpenGL Mathematics (GLM) -/// - Website: glm.g-truc.net -/// - GLM API documentation -/// - GLM Manual - -#include "detail/_fixes.hpp" - -#include "detail/setup.hpp" - -#pragma once - -#include -#include -#include -#include -#include -#include "fwd.hpp" - -#include "vec2.hpp" -#include "vec3.hpp" -#include "vec4.hpp" -#include "mat2x2.hpp" -#include "mat2x3.hpp" -#include "mat2x4.hpp" -#include "mat3x2.hpp" -#include "mat3x3.hpp" -#include "mat3x4.hpp" -#include "mat4x2.hpp" -#include "mat4x3.hpp" -#include "mat4x4.hpp" - -#include "trigonometric.hpp" -#include "exponential.hpp" -#include "common.hpp" -#include "packing.hpp" -#include "geometric.hpp" -#include "matrix.hpp" -#include "vector_relational.hpp" -#include "integer.hpp" diff --git a/third_party/glm/gtc/bitfield.hpp b/third_party/glm/gtc/bitfield.hpp deleted file mode 100755 index 084fbe7..0000000 --- a/third_party/glm/gtc/bitfield.hpp +++ /dev/null @@ -1,266 +0,0 @@ -/// @ref gtc_bitfield -/// @file glm/gtc/bitfield.hpp -/// -/// @see core (dependence) -/// @see gtc_bitfield (dependence) -/// -/// @defgroup gtc_bitfield GLM_GTC_bitfield -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Allow to perform bit operations on integer values - -#include "../detail/setup.hpp" - -#pragma once - -// Dependencies -#include "../ext/scalar_int_sized.hpp" -#include "../ext/scalar_uint_sized.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/_vectorize.hpp" -#include "type_precision.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_bitfield extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_bitfield - /// @{ - - /// Build a mask of 'count' bits - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL genIUType mask(genIUType Bits); - - /// Build a mask of 'count' bits - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed and unsigned integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL vec mask(vec const& v); - - /// Rotate all bits to the right. All the bits dropped in the right side are inserted back on the left side. - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL genIUType bitfieldRotateRight(genIUType In, int Shift); - - /// Rotate all bits to the right. All the bits dropped in the right side are inserted back on the left side. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed and unsigned integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL vec bitfieldRotateRight(vec const& In, int Shift); - - /// Rotate all bits to the left. All the bits dropped in the left side are inserted back on the right side. - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL genIUType bitfieldRotateLeft(genIUType In, int Shift); - - /// Rotate all bits to the left. All the bits dropped in the left side are inserted back on the right side. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed and unsigned integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL vec bitfieldRotateLeft(vec const& In, int Shift); - - /// Set to 1 a range of bits. - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL genIUType bitfieldFillOne(genIUType Value, int FirstBit, int BitCount); - - /// Set to 1 a range of bits. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed and unsigned integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL vec bitfieldFillOne(vec const& Value, int FirstBit, int BitCount); - - /// Set to 0 a range of bits. - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL genIUType bitfieldFillZero(genIUType Value, int FirstBit, int BitCount); - - /// Set to 0 a range of bits. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed and unsigned integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL vec bitfieldFillZero(vec const& Value, int FirstBit, int BitCount); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of x followed by the first bit of y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int16 bitfieldInterleave(int8 x, int8 y); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of x followed by the first bit of y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint16 bitfieldInterleave(uint8 x, uint8 y); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of v.x followed by the first bit of v.y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint16 bitfieldInterleave(u8vec2 const& v); - - /// Deinterleaves the bits of x. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL glm::u8vec2 bitfieldDeinterleave(glm::uint16 x); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of x followed by the first bit of y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int32 bitfieldInterleave(int16 x, int16 y); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of x followed by the first bit of y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint32 bitfieldInterleave(uint16 x, uint16 y); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of v.x followed by the first bit of v.y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint32 bitfieldInterleave(u16vec2 const& v); - - /// Deinterleaves the bits of x. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL glm::u16vec2 bitfieldDeinterleave(glm::uint32 x); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of x followed by the first bit of y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int64 bitfieldInterleave(int32 x, int32 y); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of x followed by the first bit of y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint64 bitfieldInterleave(uint32 x, uint32 y); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of v.x followed by the first bit of v.y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint64 bitfieldInterleave(u32vec2 const& v); - - /// Deinterleaves the bits of x. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL glm::u32vec2 bitfieldDeinterleave(glm::uint64 x); - - /// Interleaves the bits of x, y and z. - /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int32 bitfieldInterleave(int8 x, int8 y, int8 z); - - /// Interleaves the bits of x, y and z. - /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z); - - /// Interleaves the bits of x, y and z. - /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int64 bitfieldInterleave(int16 x, int16 y, int16 z); - - /// Interleaves the bits of x, y and z. - /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z); - - /// Interleaves the bits of x, y and z. - /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int64 bitfieldInterleave(int32 x, int32 y, int32 z); - - /// Interleaves the bits of x, y and z. - /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint64 bitfieldInterleave(uint32 x, uint32 y, uint32 z); - - /// Interleaves the bits of x, y, z and w. - /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int32 bitfieldInterleave(int8 x, int8 y, int8 z, int8 w); - - /// Interleaves the bits of x, y, z and w. - /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z, uint8 w); - - /// Interleaves the bits of x, y, z and w. - /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int64 bitfieldInterleave(int16 x, int16 y, int16 z, int16 w); - - /// Interleaves the bits of x, y, z and w. - /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z, uint16 w); - - /// @} -} //namespace glm - -#include "bitfield.inl" diff --git a/third_party/glm/gtc/bitfield.inl b/third_party/glm/gtc/bitfield.inl deleted file mode 100755 index 06cf188..0000000 --- a/third_party/glm/gtc/bitfield.inl +++ /dev/null @@ -1,626 +0,0 @@ -/// @ref gtc_bitfield - -#include "../simd/integer.h" - -namespace glm{ -namespace detail -{ - template - GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y); - - template - GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y, PARAM z); - - template - GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y, PARAM z, PARAM w); - - template<> - GLM_FUNC_QUALIFIER glm::uint16 bitfieldInterleave(glm::uint8 x, glm::uint8 y) - { - glm::uint16 REG1(x); - glm::uint16 REG2(y); - - REG1 = ((REG1 << 4) | REG1) & static_cast(0x0F0F); - REG2 = ((REG2 << 4) | REG2) & static_cast(0x0F0F); - - REG1 = ((REG1 << 2) | REG1) & static_cast(0x3333); - REG2 = ((REG2 << 2) | REG2) & static_cast(0x3333); - - REG1 = ((REG1 << 1) | REG1) & static_cast(0x5555); - REG2 = ((REG2 << 1) | REG2) & static_cast(0x5555); - - return REG1 | static_cast(REG2 << 1); - } - - template<> - GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint16 x, glm::uint16 y) - { - glm::uint32 REG1(x); - glm::uint32 REG2(y); - - REG1 = ((REG1 << 8) | REG1) & static_cast(0x00FF00FF); - REG2 = ((REG2 << 8) | REG2) & static_cast(0x00FF00FF); - - REG1 = ((REG1 << 4) | REG1) & static_cast(0x0F0F0F0F); - REG2 = ((REG2 << 4) | REG2) & static_cast(0x0F0F0F0F); - - REG1 = ((REG1 << 2) | REG1) & static_cast(0x33333333); - REG2 = ((REG2 << 2) | REG2) & static_cast(0x33333333); - - REG1 = ((REG1 << 1) | REG1) & static_cast(0x55555555); - REG2 = ((REG2 << 1) | REG2) & static_cast(0x55555555); - - return REG1 | (REG2 << 1); - } - - template<> - GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint32 x, glm::uint32 y) - { - glm::uint64 REG1(x); - glm::uint64 REG2(y); - - REG1 = ((REG1 << 16) | REG1) & static_cast(0x0000FFFF0000FFFFull); - REG2 = ((REG2 << 16) | REG2) & static_cast(0x0000FFFF0000FFFFull); - - REG1 = ((REG1 << 8) | REG1) & static_cast(0x00FF00FF00FF00FFull); - REG2 = ((REG2 << 8) | REG2) & static_cast(0x00FF00FF00FF00FFull); - - REG1 = ((REG1 << 4) | REG1) & static_cast(0x0F0F0F0F0F0F0F0Full); - REG2 = ((REG2 << 4) | REG2) & static_cast(0x0F0F0F0F0F0F0F0Full); - - REG1 = ((REG1 << 2) | REG1) & static_cast(0x3333333333333333ull); - REG2 = ((REG2 << 2) | REG2) & static_cast(0x3333333333333333ull); - - REG1 = ((REG1 << 1) | REG1) & static_cast(0x5555555555555555ull); - REG2 = ((REG2 << 1) | REG2) & static_cast(0x5555555555555555ull); - - return REG1 | (REG2 << 1); - } - - template<> - GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint8 x, glm::uint8 y, glm::uint8 z) - { - glm::uint32 REG1(x); - glm::uint32 REG2(y); - glm::uint32 REG3(z); - - REG1 = ((REG1 << 16) | REG1) & static_cast(0xFF0000FFu); - REG2 = ((REG2 << 16) | REG2) & static_cast(0xFF0000FFu); - REG3 = ((REG3 << 16) | REG3) & static_cast(0xFF0000FFu); - - REG1 = ((REG1 << 8) | REG1) & static_cast(0x0F00F00Fu); - REG2 = ((REG2 << 8) | REG2) & static_cast(0x0F00F00Fu); - REG3 = ((REG3 << 8) | REG3) & static_cast(0x0F00F00Fu); - - REG1 = ((REG1 << 4) | REG1) & static_cast(0xC30C30C3u); - REG2 = ((REG2 << 4) | REG2) & static_cast(0xC30C30C3u); - REG3 = ((REG3 << 4) | REG3) & static_cast(0xC30C30C3u); - - REG1 = ((REG1 << 2) | REG1) & static_cast(0x49249249u); - REG2 = ((REG2 << 2) | REG2) & static_cast(0x49249249u); - REG3 = ((REG3 << 2) | REG3) & static_cast(0x49249249u); - - return REG1 | (REG2 << 1) | (REG3 << 2); - } - - template<> - GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint16 x, glm::uint16 y, glm::uint16 z) - { - glm::uint64 REG1(x); - glm::uint64 REG2(y); - glm::uint64 REG3(z); - - REG1 = ((REG1 << 32) | REG1) & static_cast(0xFFFF00000000FFFFull); - REG2 = ((REG2 << 32) | REG2) & static_cast(0xFFFF00000000FFFFull); - REG3 = ((REG3 << 32) | REG3) & static_cast(0xFFFF00000000FFFFull); - - REG1 = ((REG1 << 16) | REG1) & static_cast(0x00FF0000FF0000FFull); - REG2 = ((REG2 << 16) | REG2) & static_cast(0x00FF0000FF0000FFull); - REG3 = ((REG3 << 16) | REG3) & static_cast(0x00FF0000FF0000FFull); - - REG1 = ((REG1 << 8) | REG1) & static_cast(0xF00F00F00F00F00Full); - REG2 = ((REG2 << 8) | REG2) & static_cast(0xF00F00F00F00F00Full); - REG3 = ((REG3 << 8) | REG3) & static_cast(0xF00F00F00F00F00Full); - - REG1 = ((REG1 << 4) | REG1) & static_cast(0x30C30C30C30C30C3ull); - REG2 = ((REG2 << 4) | REG2) & static_cast(0x30C30C30C30C30C3ull); - REG3 = ((REG3 << 4) | REG3) & static_cast(0x30C30C30C30C30C3ull); - - REG1 = ((REG1 << 2) | REG1) & static_cast(0x9249249249249249ull); - REG2 = ((REG2 << 2) | REG2) & static_cast(0x9249249249249249ull); - REG3 = ((REG3 << 2) | REG3) & static_cast(0x9249249249249249ull); - - return REG1 | (REG2 << 1) | (REG3 << 2); - } - - template<> - GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint32 x, glm::uint32 y, glm::uint32 z) - { - glm::uint64 REG1(x); - glm::uint64 REG2(y); - glm::uint64 REG3(z); - - REG1 = ((REG1 << 32) | REG1) & static_cast(0xFFFF00000000FFFFull); - REG2 = ((REG2 << 32) | REG2) & static_cast(0xFFFF00000000FFFFull); - REG3 = ((REG3 << 32) | REG3) & static_cast(0xFFFF00000000FFFFull); - - REG1 = ((REG1 << 16) | REG1) & static_cast(0x00FF0000FF0000FFull); - REG2 = ((REG2 << 16) | REG2) & static_cast(0x00FF0000FF0000FFull); - REG3 = ((REG3 << 16) | REG3) & static_cast(0x00FF0000FF0000FFull); - - REG1 = ((REG1 << 8) | REG1) & static_cast(0xF00F00F00F00F00Full); - REG2 = ((REG2 << 8) | REG2) & static_cast(0xF00F00F00F00F00Full); - REG3 = ((REG3 << 8) | REG3) & static_cast(0xF00F00F00F00F00Full); - - REG1 = ((REG1 << 4) | REG1) & static_cast(0x30C30C30C30C30C3ull); - REG2 = ((REG2 << 4) | REG2) & static_cast(0x30C30C30C30C30C3ull); - REG3 = ((REG3 << 4) | REG3) & static_cast(0x30C30C30C30C30C3ull); - - REG1 = ((REG1 << 2) | REG1) & static_cast(0x9249249249249249ull); - REG2 = ((REG2 << 2) | REG2) & static_cast(0x9249249249249249ull); - REG3 = ((REG3 << 2) | REG3) & static_cast(0x9249249249249249ull); - - return REG1 | (REG2 << 1) | (REG3 << 2); - } - - template<> - GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint8 x, glm::uint8 y, glm::uint8 z, glm::uint8 w) - { - glm::uint32 REG1(x); - glm::uint32 REG2(y); - glm::uint32 REG3(z); - glm::uint32 REG4(w); - - REG1 = ((REG1 << 12) | REG1) & static_cast(0x000F000Fu); - REG2 = ((REG2 << 12) | REG2) & static_cast(0x000F000Fu); - REG3 = ((REG3 << 12) | REG3) & static_cast(0x000F000Fu); - REG4 = ((REG4 << 12) | REG4) & static_cast(0x000F000Fu); - - REG1 = ((REG1 << 6) | REG1) & static_cast(0x03030303u); - REG2 = ((REG2 << 6) | REG2) & static_cast(0x03030303u); - REG3 = ((REG3 << 6) | REG3) & static_cast(0x03030303u); - REG4 = ((REG4 << 6) | REG4) & static_cast(0x03030303u); - - REG1 = ((REG1 << 3) | REG1) & static_cast(0x11111111u); - REG2 = ((REG2 << 3) | REG2) & static_cast(0x11111111u); - REG3 = ((REG3 << 3) | REG3) & static_cast(0x11111111u); - REG4 = ((REG4 << 3) | REG4) & static_cast(0x11111111u); - - return REG1 | (REG2 << 1) | (REG3 << 2) | (REG4 << 3); - } - - template<> - GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint16 x, glm::uint16 y, glm::uint16 z, glm::uint16 w) - { - glm::uint64 REG1(x); - glm::uint64 REG2(y); - glm::uint64 REG3(z); - glm::uint64 REG4(w); - - REG1 = ((REG1 << 24) | REG1) & static_cast(0x000000FF000000FFull); - REG2 = ((REG2 << 24) | REG2) & static_cast(0x000000FF000000FFull); - REG3 = ((REG3 << 24) | REG3) & static_cast(0x000000FF000000FFull); - REG4 = ((REG4 << 24) | REG4) & static_cast(0x000000FF000000FFull); - - REG1 = ((REG1 << 12) | REG1) & static_cast(0x000F000F000F000Full); - REG2 = ((REG2 << 12) | REG2) & static_cast(0x000F000F000F000Full); - REG3 = ((REG3 << 12) | REG3) & static_cast(0x000F000F000F000Full); - REG4 = ((REG4 << 12) | REG4) & static_cast(0x000F000F000F000Full); - - REG1 = ((REG1 << 6) | REG1) & static_cast(0x0303030303030303ull); - REG2 = ((REG2 << 6) | REG2) & static_cast(0x0303030303030303ull); - REG3 = ((REG3 << 6) | REG3) & static_cast(0x0303030303030303ull); - REG4 = ((REG4 << 6) | REG4) & static_cast(0x0303030303030303ull); - - REG1 = ((REG1 << 3) | REG1) & static_cast(0x1111111111111111ull); - REG2 = ((REG2 << 3) | REG2) & static_cast(0x1111111111111111ull); - REG3 = ((REG3 << 3) | REG3) & static_cast(0x1111111111111111ull); - REG4 = ((REG4 << 3) | REG4) & static_cast(0x1111111111111111ull); - - return REG1 | (REG2 << 1) | (REG3 << 2) | (REG4 << 3); - } -}//namespace detail - - template - GLM_FUNC_QUALIFIER genIUType mask(genIUType Bits) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'mask' accepts only integer values"); - - return Bits >= sizeof(genIUType) * 8 ? ~static_cast(0) : (static_cast(1) << Bits) - static_cast(1); - } - - template - GLM_FUNC_QUALIFIER vec mask(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'mask' accepts only integer values"); - - return detail::functor1::call(mask, v); - } - - template - GLM_FUNC_QUALIFIER genIType bitfieldRotateRight(genIType In, int Shift) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateRight' accepts only integer values"); - - int const BitSize = static_cast(sizeof(genIType) * 8); - return (In << static_cast(Shift)) | (In >> static_cast(BitSize - Shift)); - } - - template - GLM_FUNC_QUALIFIER vec bitfieldRotateRight(vec const& In, int Shift) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateRight' accepts only integer values"); - - int const BitSize = static_cast(sizeof(T) * 8); - return (In << static_cast(Shift)) | (In >> static_cast(BitSize - Shift)); - } - - template - GLM_FUNC_QUALIFIER genIType bitfieldRotateLeft(genIType In, int Shift) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateLeft' accepts only integer values"); - - int const BitSize = static_cast(sizeof(genIType) * 8); - return (In >> static_cast(Shift)) | (In << static_cast(BitSize - Shift)); - } - - template - GLM_FUNC_QUALIFIER vec bitfieldRotateLeft(vec const& In, int Shift) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateLeft' accepts only integer values"); - - int const BitSize = static_cast(sizeof(T) * 8); - return (In >> static_cast(Shift)) | (In << static_cast(BitSize - Shift)); - } - - template - GLM_FUNC_QUALIFIER genIUType bitfieldFillOne(genIUType Value, int FirstBit, int BitCount) - { - return Value | static_cast(mask(BitCount) << FirstBit); - } - - template - GLM_FUNC_QUALIFIER vec bitfieldFillOne(vec const& Value, int FirstBit, int BitCount) - { - return Value | static_cast(mask(BitCount) << FirstBit); - } - - template - GLM_FUNC_QUALIFIER genIUType bitfieldFillZero(genIUType Value, int FirstBit, int BitCount) - { - return Value & static_cast(~(mask(BitCount) << FirstBit)); - } - - template - GLM_FUNC_QUALIFIER vec bitfieldFillZero(vec const& Value, int FirstBit, int BitCount) - { - return Value & static_cast(~(mask(BitCount) << FirstBit)); - } - - GLM_FUNC_QUALIFIER int16 bitfieldInterleave(int8 x, int8 y) - { - union sign8 - { - int8 i; - uint8 u; - } sign_x, sign_y; - - union sign16 - { - int16 i; - uint16 u; - } result; - - sign_x.i = x; - sign_y.i = y; - result.u = bitfieldInterleave(sign_x.u, sign_y.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint16 bitfieldInterleave(uint8 x, uint8 y) - { - return detail::bitfieldInterleave(x, y); - } - - GLM_FUNC_QUALIFIER uint16 bitfieldInterleave(u8vec2 const& v) - { - return detail::bitfieldInterleave(v.x, v.y); - } - - GLM_FUNC_QUALIFIER u8vec2 bitfieldDeinterleave(glm::uint16 x) - { - uint16 REG1(x); - uint16 REG2(x >>= 1); - - REG1 = REG1 & static_cast(0x5555); - REG2 = REG2 & static_cast(0x5555); - - REG1 = ((REG1 >> 1) | REG1) & static_cast(0x3333); - REG2 = ((REG2 >> 1) | REG2) & static_cast(0x3333); - - REG1 = ((REG1 >> 2) | REG1) & static_cast(0x0F0F); - REG2 = ((REG2 >> 2) | REG2) & static_cast(0x0F0F); - - REG1 = ((REG1 >> 4) | REG1) & static_cast(0x00FF); - REG2 = ((REG2 >> 4) | REG2) & static_cast(0x00FF); - - REG1 = ((REG1 >> 8) | REG1) & static_cast(0xFFFF); - REG2 = ((REG2 >> 8) | REG2) & static_cast(0xFFFF); - - return glm::u8vec2(REG1, REG2); - } - - GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int16 x, int16 y) - { - union sign16 - { - int16 i; - uint16 u; - } sign_x, sign_y; - - union sign32 - { - int32 i; - uint32 u; - } result; - - sign_x.i = x; - sign_y.i = y; - result.u = bitfieldInterleave(sign_x.u, sign_y.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint16 x, uint16 y) - { - return detail::bitfieldInterleave(x, y); - } - - GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(u16vec2 const& v) - { - return detail::bitfieldInterleave(v.x, v.y); - } - - GLM_FUNC_QUALIFIER glm::u16vec2 bitfieldDeinterleave(glm::uint32 x) - { - glm::uint32 REG1(x); - glm::uint32 REG2(x >>= 1); - - REG1 = REG1 & static_cast(0x55555555); - REG2 = REG2 & static_cast(0x55555555); - - REG1 = ((REG1 >> 1) | REG1) & static_cast(0x33333333); - REG2 = ((REG2 >> 1) | REG2) & static_cast(0x33333333); - - REG1 = ((REG1 >> 2) | REG1) & static_cast(0x0F0F0F0F); - REG2 = ((REG2 >> 2) | REG2) & static_cast(0x0F0F0F0F); - - REG1 = ((REG1 >> 4) | REG1) & static_cast(0x00FF00FF); - REG2 = ((REG2 >> 4) | REG2) & static_cast(0x00FF00FF); - - REG1 = ((REG1 >> 8) | REG1) & static_cast(0x0000FFFF); - REG2 = ((REG2 >> 8) | REG2) & static_cast(0x0000FFFF); - - return glm::u16vec2(REG1, REG2); - } - - GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int32 x, int32 y) - { - union sign32 - { - int32 i; - uint32 u; - } sign_x, sign_y; - - union sign64 - { - int64 i; - uint64 u; - } result; - - sign_x.i = x; - sign_y.i = y; - result.u = bitfieldInterleave(sign_x.u, sign_y.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint32 x, uint32 y) - { - return detail::bitfieldInterleave(x, y); - } - - GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(u32vec2 const& v) - { - return detail::bitfieldInterleave(v.x, v.y); - } - - GLM_FUNC_QUALIFIER glm::u32vec2 bitfieldDeinterleave(glm::uint64 x) - { - glm::uint64 REG1(x); - glm::uint64 REG2(x >>= 1); - - REG1 = REG1 & static_cast(0x5555555555555555ull); - REG2 = REG2 & static_cast(0x5555555555555555ull); - - REG1 = ((REG1 >> 1) | REG1) & static_cast(0x3333333333333333ull); - REG2 = ((REG2 >> 1) | REG2) & static_cast(0x3333333333333333ull); - - REG1 = ((REG1 >> 2) | REG1) & static_cast(0x0F0F0F0F0F0F0F0Full); - REG2 = ((REG2 >> 2) | REG2) & static_cast(0x0F0F0F0F0F0F0F0Full); - - REG1 = ((REG1 >> 4) | REG1) & static_cast(0x00FF00FF00FF00FFull); - REG2 = ((REG2 >> 4) | REG2) & static_cast(0x00FF00FF00FF00FFull); - - REG1 = ((REG1 >> 8) | REG1) & static_cast(0x0000FFFF0000FFFFull); - REG2 = ((REG2 >> 8) | REG2) & static_cast(0x0000FFFF0000FFFFull); - - REG1 = ((REG1 >> 16) | REG1) & static_cast(0x00000000FFFFFFFFull); - REG2 = ((REG2 >> 16) | REG2) & static_cast(0x00000000FFFFFFFFull); - - return glm::u32vec2(REG1, REG2); - } - - GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int8 x, int8 y, int8 z) - { - union sign8 - { - int8 i; - uint8 u; - } sign_x, sign_y, sign_z; - - union sign32 - { - int32 i; - uint32 u; - } result; - - sign_x.i = x; - sign_y.i = y; - sign_z.i = z; - result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z) - { - return detail::bitfieldInterleave(x, y, z); - } - - GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(u8vec3 const& v) - { - return detail::bitfieldInterleave(v.x, v.y, v.z); - } - - GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int16 x, int16 y, int16 z) - { - union sign16 - { - int16 i; - uint16 u; - } sign_x, sign_y, sign_z; - - union sign64 - { - int64 i; - uint64 u; - } result; - - sign_x.i = x; - sign_y.i = y; - sign_z.i = z; - result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z) - { - return detail::bitfieldInterleave(x, y, z); - } - - GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u16vec3 const& v) - { - return detail::bitfieldInterleave(v.x, v.y, v.z); - } - - GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int32 x, int32 y, int32 z) - { - union sign16 - { - int32 i; - uint32 u; - } sign_x, sign_y, sign_z; - - union sign64 - { - int64 i; - uint64 u; - } result; - - sign_x.i = x; - sign_y.i = y; - sign_z.i = z; - result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint32 x, uint32 y, uint32 z) - { - return detail::bitfieldInterleave(x, y, z); - } - - GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u32vec3 const& v) - { - return detail::bitfieldInterleave(v.x, v.y, v.z); - } - - GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int8 x, int8 y, int8 z, int8 w) - { - union sign8 - { - int8 i; - uint8 u; - } sign_x, sign_y, sign_z, sign_w; - - union sign32 - { - int32 i; - uint32 u; - } result; - - sign_x.i = x; - sign_y.i = y; - sign_z.i = z; - sign_w.i = w; - result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u, sign_w.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z, uint8 w) - { - return detail::bitfieldInterleave(x, y, z, w); - } - - GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(u8vec4 const& v) - { - return detail::bitfieldInterleave(v.x, v.y, v.z, v.w); - } - - GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int16 x, int16 y, int16 z, int16 w) - { - union sign16 - { - int16 i; - uint16 u; - } sign_x, sign_y, sign_z, sign_w; - - union sign64 - { - int64 i; - uint64 u; - } result; - - sign_x.i = x; - sign_y.i = y; - sign_z.i = z; - sign_w.i = w; - result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u, sign_w.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z, uint16 w) - { - return detail::bitfieldInterleave(x, y, z, w); - } - - GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u16vec4 const& v) - { - return detail::bitfieldInterleave(v.x, v.y, v.z, v.w); - } -}//namespace glm diff --git a/third_party/glm/gtc/color_space.hpp b/third_party/glm/gtc/color_space.hpp deleted file mode 100755 index cffd9f0..0000000 --- a/third_party/glm/gtc/color_space.hpp +++ /dev/null @@ -1,56 +0,0 @@ -/// @ref gtc_color_space -/// @file glm/gtc/color_space.hpp -/// -/// @see core (dependence) -/// @see gtc_color_space (dependence) -/// -/// @defgroup gtc_color_space GLM_GTC_color_space -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Allow to perform bit operations on integer values - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../exponential.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_color_space extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_color_space - /// @{ - - /// Convert a linear color to sRGB color using a standard gamma correction. - /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb - template - GLM_FUNC_DECL vec convertLinearToSRGB(vec const& ColorLinear); - - /// Convert a linear color to sRGB color using a custom gamma correction. - /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb - template - GLM_FUNC_DECL vec convertLinearToSRGB(vec const& ColorLinear, T Gamma); - - /// Convert a sRGB color to linear color using a standard gamma correction. - /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb - template - GLM_FUNC_DECL vec convertSRGBToLinear(vec const& ColorSRGB); - - /// Convert a sRGB color to linear color using a custom gamma correction. - // IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb - template - GLM_FUNC_DECL vec convertSRGBToLinear(vec const& ColorSRGB, T Gamma); - - /// @} -} //namespace glm - -#include "color_space.inl" diff --git a/third_party/glm/gtc/color_space.inl b/third_party/glm/gtc/color_space.inl deleted file mode 100755 index 2a90004..0000000 --- a/third_party/glm/gtc/color_space.inl +++ /dev/null @@ -1,84 +0,0 @@ -/// @ref gtc_color_space - -namespace glm{ -namespace detail -{ - template - struct compute_rgbToSrgb - { - GLM_FUNC_QUALIFIER static vec call(vec const& ColorRGB, T GammaCorrection) - { - vec const ClampedColor(clamp(ColorRGB, static_cast(0), static_cast(1))); - - return mix( - pow(ClampedColor, vec(GammaCorrection)) * static_cast(1.055) - static_cast(0.055), - ClampedColor * static_cast(12.92), - lessThan(ClampedColor, vec(static_cast(0.0031308)))); - } - }; - - template - struct compute_rgbToSrgb<4, T, Q> - { - GLM_FUNC_QUALIFIER static vec<4, T, Q> call(vec<4, T, Q> const& ColorRGB, T GammaCorrection) - { - return vec<4, T, Q>(compute_rgbToSrgb<3, T, Q>::call(vec<3, T, Q>(ColorRGB), GammaCorrection), ColorRGB.w); - } - }; - - template - struct compute_srgbToRgb - { - GLM_FUNC_QUALIFIER static vec call(vec const& ColorSRGB, T Gamma) - { - return mix( - pow((ColorSRGB + static_cast(0.055)) * static_cast(0.94786729857819905213270142180095), vec(Gamma)), - ColorSRGB * static_cast(0.07739938080495356037151702786378), - lessThanEqual(ColorSRGB, vec(static_cast(0.04045)))); - } - }; - - template - struct compute_srgbToRgb<4, T, Q> - { - GLM_FUNC_QUALIFIER static vec<4, T, Q> call(vec<4, T, Q> const& ColorSRGB, T Gamma) - { - return vec<4, T, Q>(compute_srgbToRgb<3, T, Q>::call(vec<3, T, Q>(ColorSRGB), Gamma), ColorSRGB.w); - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER vec convertLinearToSRGB(vec const& ColorLinear) - { - return detail::compute_rgbToSrgb::call(ColorLinear, static_cast(0.41666)); - } - - // Based on Ian Taylor http://chilliant.blogspot.fr/2012/08/srgb-approximations-for-hlsl.html - template<> - GLM_FUNC_QUALIFIER vec<3, float, lowp> convertLinearToSRGB(vec<3, float, lowp> const& ColorLinear) - { - vec<3, float, lowp> S1 = sqrt(ColorLinear); - vec<3, float, lowp> S2 = sqrt(S1); - vec<3, float, lowp> S3 = sqrt(S2); - return 0.662002687f * S1 + 0.684122060f * S2 - 0.323583601f * S3 - 0.0225411470f * ColorLinear; - } - - template - GLM_FUNC_QUALIFIER vec convertLinearToSRGB(vec const& ColorLinear, T Gamma) - { - return detail::compute_rgbToSrgb::call(ColorLinear, static_cast(1) / Gamma); - } - - template - GLM_FUNC_QUALIFIER vec convertSRGBToLinear(vec const& ColorSRGB) - { - return detail::compute_srgbToRgb::call(ColorSRGB, static_cast(2.4)); - } - - template - GLM_FUNC_QUALIFIER vec convertSRGBToLinear(vec const& ColorSRGB, T Gamma) - { - return detail::compute_srgbToRgb::call(ColorSRGB, Gamma); - } -}//namespace glm diff --git a/third_party/glm/gtc/constants.hpp b/third_party/glm/gtc/constants.hpp deleted file mode 100755 index 99f2128..0000000 --- a/third_party/glm/gtc/constants.hpp +++ /dev/null @@ -1,165 +0,0 @@ -/// @ref gtc_constants -/// @file glm/gtc/constants.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_constants GLM_GTC_constants -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Provide a list of constants and precomputed useful values. - -#pragma once - -// Dependencies -#include "../ext/scalar_constants.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_constants extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_constants - /// @{ - - /// Return 0. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType zero(); - - /// Return 1. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType one(); - - /// Return pi * 2. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType two_pi(); - - /// Return square root of pi. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType root_pi(); - - /// Return pi / 2. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType half_pi(); - - /// Return pi / 2 * 3. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType three_over_two_pi(); - - /// Return pi / 4. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType quarter_pi(); - - /// Return 1 / pi. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_pi(); - - /// Return 1 / (pi * 2). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_two_pi(); - - /// Return 2 / pi. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_pi(); - - /// Return 4 / pi. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType four_over_pi(); - - /// Return 2 / sqrt(pi). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_root_pi(); - - /// Return 1 / sqrt(2). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_root_two(); - - /// Return sqrt(pi / 2). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType root_half_pi(); - - /// Return sqrt(2 * pi). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType root_two_pi(); - - /// Return sqrt(ln(4)). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType root_ln_four(); - - /// Return e constant. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType e(); - - /// Return Euler's constant. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType euler(); - - /// Return sqrt(2). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType root_two(); - - /// Return sqrt(3). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType root_three(); - - /// Return sqrt(5). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType root_five(); - - /// Return ln(2). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType ln_two(); - - /// Return ln(10). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ten(); - - /// Return ln(ln(2)). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ln_two(); - - /// Return 1 / 3. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType third(); - - /// Return 2 / 3. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType two_thirds(); - - /// Return the golden ratio constant. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType golden_ratio(); - - /// @} -} //namespace glm - -#include "constants.inl" diff --git a/third_party/glm/gtc/constants.inl b/third_party/glm/gtc/constants.inl deleted file mode 100755 index bb98c6b..0000000 --- a/third_party/glm/gtc/constants.inl +++ /dev/null @@ -1,167 +0,0 @@ -/// @ref gtc_constants - -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType zero() - { - return genType(0); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one() - { - return genType(1); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_pi() - { - return genType(6.28318530717958647692528676655900576); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_pi() - { - return genType(1.772453850905516027); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType half_pi() - { - return genType(1.57079632679489661923132169163975144); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType three_over_two_pi() - { - return genType(4.71238898038468985769396507491925432); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType quarter_pi() - { - return genType(0.785398163397448309615660845819875721); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_pi() - { - return genType(0.318309886183790671537767526745028724); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_two_pi() - { - return genType(0.159154943091895335768883763372514362); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_over_pi() - { - return genType(0.636619772367581343075535053490057448); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType four_over_pi() - { - return genType(1.273239544735162686151070106980114898); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_over_root_pi() - { - return genType(1.12837916709551257389615890312154517); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_root_two() - { - return genType(0.707106781186547524400844362104849039); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_half_pi() - { - return genType(1.253314137315500251); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_two_pi() - { - return genType(2.506628274631000502); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_ln_four() - { - return genType(1.17741002251547469); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType e() - { - return genType(2.71828182845904523536); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType euler() - { - return genType(0.577215664901532860606); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_two() - { - return genType(1.41421356237309504880168872420969808); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_three() - { - return genType(1.73205080756887729352744634150587236); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_five() - { - return genType(2.23606797749978969640917366873127623); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_two() - { - return genType(0.693147180559945309417232121458176568); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_ten() - { - return genType(2.30258509299404568401799145468436421); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_ln_two() - { - return genType(-0.3665129205816643); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType third() - { - return genType(0.3333333333333333333333333333333333333333); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_thirds() - { - return genType(0.666666666666666666666666666666666666667); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType golden_ratio() - { - return genType(1.61803398874989484820458683436563811); - } - -} //namespace glm diff --git a/third_party/glm/gtc/epsilon.hpp b/third_party/glm/gtc/epsilon.hpp deleted file mode 100755 index 640439b..0000000 --- a/third_party/glm/gtc/epsilon.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/// @ref gtc_epsilon -/// @file glm/gtc/epsilon.hpp -/// -/// @see core (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtc_epsilon GLM_GTC_epsilon -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Comparison functions for a user defined epsilon values. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_epsilon extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_epsilon - /// @{ - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is satisfied. - /// - /// @see gtc_epsilon - template - GLM_FUNC_DECL vec epsilonEqual(vec const& x, vec const& y, T const& epsilon); - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is satisfied. - /// - /// @see gtc_epsilon - template - GLM_FUNC_DECL bool epsilonEqual(genType const& x, genType const& y, genType const& epsilon); - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is not satisfied. - /// - /// @see gtc_epsilon - template - GLM_FUNC_DECL vec epsilonNotEqual(vec const& x, vec const& y, T const& epsilon); - - /// Returns the component-wise comparison of |x - y| >= epsilon. - /// True if this expression is not satisfied. - /// - /// @see gtc_epsilon - template - GLM_FUNC_DECL bool epsilonNotEqual(genType const& x, genType const& y, genType const& epsilon); - - /// @} -}//namespace glm - -#include "epsilon.inl" diff --git a/third_party/glm/gtc/epsilon.inl b/third_party/glm/gtc/epsilon.inl deleted file mode 100755 index 508b9f8..0000000 --- a/third_party/glm/gtc/epsilon.inl +++ /dev/null @@ -1,80 +0,0 @@ -/// @ref gtc_epsilon - -// Dependency: -#include "../vector_relational.hpp" -#include "../common.hpp" - -namespace glm -{ - template<> - GLM_FUNC_QUALIFIER bool epsilonEqual - ( - float const& x, - float const& y, - float const& epsilon - ) - { - return abs(x - y) < epsilon; - } - - template<> - GLM_FUNC_QUALIFIER bool epsilonEqual - ( - double const& x, - double const& y, - double const& epsilon - ) - { - return abs(x - y) < epsilon; - } - - template - GLM_FUNC_QUALIFIER vec epsilonEqual(vec const& x, vec const& y, T const& epsilon) - { - return lessThan(abs(x - y), vec(epsilon)); - } - - template - GLM_FUNC_QUALIFIER vec epsilonEqual(vec const& x, vec const& y, vec const& epsilon) - { - return lessThan(abs(x - y), vec(epsilon)); - } - - template<> - GLM_FUNC_QUALIFIER bool epsilonNotEqual(float const& x, float const& y, float const& epsilon) - { - return abs(x - y) >= epsilon; - } - - template<> - GLM_FUNC_QUALIFIER bool epsilonNotEqual(double const& x, double const& y, double const& epsilon) - { - return abs(x - y) >= epsilon; - } - - template - GLM_FUNC_QUALIFIER vec epsilonNotEqual(vec const& x, vec const& y, T const& epsilon) - { - return greaterThanEqual(abs(x - y), vec(epsilon)); - } - - template - GLM_FUNC_QUALIFIER vec epsilonNotEqual(vec const& x, vec const& y, vec const& epsilon) - { - return greaterThanEqual(abs(x - y), vec(epsilon)); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> epsilonEqual(qua const& x, qua const& y, T const& epsilon) - { - vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); - return lessThan(abs(v), vec<4, T, Q>(epsilon)); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> epsilonNotEqual(qua const& x, qua const& y, T const& epsilon) - { - vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); - return greaterThanEqual(abs(v), vec<4, T, Q>(epsilon)); - } -}//namespace glm diff --git a/third_party/glm/gtc/integer.hpp b/third_party/glm/gtc/integer.hpp deleted file mode 100755 index 64ce10b..0000000 --- a/third_party/glm/gtc/integer.hpp +++ /dev/null @@ -1,65 +0,0 @@ -/// @ref gtc_integer -/// @file glm/gtc/integer.hpp -/// -/// @see core (dependence) -/// @see gtc_integer (dependence) -/// -/// @defgroup gtc_integer GLM_GTC_integer -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// @brief Allow to perform bit operations on integer values - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../common.hpp" -#include "../integer.hpp" -#include "../exponential.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_integer extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_integer - /// @{ - - /// Returns the log2 of x for integer values. Usefull to compute mipmap count from the texture size. - /// @see gtc_integer - template - GLM_FUNC_DECL genIUType log2(genIUType x); - - /// Returns a value equal to the nearest integer to x. - /// The fraction 0.5 will round in a direction chosen by the - /// implementation, presumably the direction that is fastest. - /// - /// @param x The values of the argument must be greater or equal to zero. - /// @tparam T floating point scalar types. - /// - /// @see GLSL round man page - /// @see gtc_integer - template - GLM_FUNC_DECL vec iround(vec const& x); - - /// Returns a value equal to the nearest integer to x. - /// The fraction 0.5 will round in a direction chosen by the - /// implementation, presumably the direction that is fastest. - /// - /// @param x The values of the argument must be greater or equal to zero. - /// @tparam T floating point scalar types. - /// - /// @see GLSL round man page - /// @see gtc_integer - template - GLM_FUNC_DECL vec uround(vec const& x); - - /// @} -} //namespace glm - -#include "integer.inl" diff --git a/third_party/glm/gtc/integer.inl b/third_party/glm/gtc/integer.inl deleted file mode 100755 index f0a8b4f..0000000 --- a/third_party/glm/gtc/integer.inl +++ /dev/null @@ -1,68 +0,0 @@ -/// @ref gtc_integer - -namespace glm{ -namespace detail -{ - template - struct compute_log2 - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - //Equivalent to return findMSB(vec); but save one function call in ASM with VC - //return findMSB(vec); - return vec(detail::compute_findMSB_vec::call(v)); - } - }; - -# if GLM_HAS_BITSCAN_WINDOWS - template - struct compute_log2<4, int, Q, false, Aligned> - { - GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v) - { - vec<4, int, Q> Result; - _BitScanReverse(reinterpret_cast(&Result.x), v.x); - _BitScanReverse(reinterpret_cast(&Result.y), v.y); - _BitScanReverse(reinterpret_cast(&Result.z), v.z); - _BitScanReverse(reinterpret_cast(&Result.w), v.w); - return Result; - } - }; -# endif//GLM_HAS_BITSCAN_WINDOWS -}//namespace detail - template - GLM_FUNC_QUALIFIER int iround(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'iround' only accept floating-point inputs"); - assert(static_cast(0.0) <= x); - - return static_cast(x + static_cast(0.5)); - } - - template - GLM_FUNC_QUALIFIER vec iround(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'iround' only accept floating-point inputs"); - assert(all(lessThanEqual(vec(0), x))); - - return vec(x + static_cast(0.5)); - } - - template - GLM_FUNC_QUALIFIER uint uround(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'uround' only accept floating-point inputs"); - assert(static_cast(0.0) <= x); - - return static_cast(x + static_cast(0.5)); - } - - template - GLM_FUNC_QUALIFIER vec uround(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'uround' only accept floating-point inputs"); - assert(all(lessThanEqual(vec(0), x))); - - return vec(x + static_cast(0.5)); - } -}//namespace glm diff --git a/third_party/glm/gtc/matrix_access.hpp b/third_party/glm/gtc/matrix_access.hpp deleted file mode 100755 index 4935ba7..0000000 --- a/third_party/glm/gtc/matrix_access.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/// @ref gtc_matrix_access -/// @file glm/gtc/matrix_access.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_matrix_access GLM_GTC_matrix_access -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Defines functions to access rows or columns of a matrix easily. - -#pragma once - -// Dependency: -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_matrix_access extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_matrix_access - /// @{ - - /// Get a specific row of a matrix. - /// @see gtc_matrix_access - template - GLM_FUNC_DECL typename genType::row_type row( - genType const& m, - length_t index); - - /// Set a specific row to a matrix. - /// @see gtc_matrix_access - template - GLM_FUNC_DECL genType row( - genType const& m, - length_t index, - typename genType::row_type const& x); - - /// Get a specific column of a matrix. - /// @see gtc_matrix_access - template - GLM_FUNC_DECL typename genType::col_type column( - genType const& m, - length_t index); - - /// Set a specific column to a matrix. - /// @see gtc_matrix_access - template - GLM_FUNC_DECL genType column( - genType const& m, - length_t index, - typename genType::col_type const& x); - - /// @} -}//namespace glm - -#include "matrix_access.inl" diff --git a/third_party/glm/gtc/matrix_access.inl b/third_party/glm/gtc/matrix_access.inl deleted file mode 100755 index 09fcc10..0000000 --- a/third_party/glm/gtc/matrix_access.inl +++ /dev/null @@ -1,62 +0,0 @@ -/// @ref gtc_matrix_access - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType row - ( - genType const& m, - length_t index, - typename genType::row_type const& x - ) - { - assert(index >= 0 && index < m[0].length()); - - genType Result = m; - for(length_t i = 0; i < m.length(); ++i) - Result[i][index] = x[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER typename genType::row_type row - ( - genType const& m, - length_t index - ) - { - assert(index >= 0 && index < m[0].length()); - - typename genType::row_type Result(0); - for(length_t i = 0; i < m.length(); ++i) - Result[i] = m[i][index]; - return Result; - } - - template - GLM_FUNC_QUALIFIER genType column - ( - genType const& m, - length_t index, - typename genType::col_type const& x - ) - { - assert(index >= 0 && index < m.length()); - - genType Result = m; - Result[index] = x; - return Result; - } - - template - GLM_FUNC_QUALIFIER typename genType::col_type column - ( - genType const& m, - length_t index - ) - { - assert(index >= 0 && index < m.length()); - - return m[index]; - } -}//namespace glm diff --git a/third_party/glm/gtc/matrix_integer.hpp b/third_party/glm/gtc/matrix_integer.hpp deleted file mode 100755 index 557a977..0000000 --- a/third_party/glm/gtc/matrix_integer.hpp +++ /dev/null @@ -1,487 +0,0 @@ -/// @ref gtc_matrix_integer -/// @file glm/gtc/matrix_integer.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_matrix_integer GLM_GTC_matrix_integer -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Defines a number of matrices with integer types. - -#pragma once - -// Dependency: -#include "../mat2x2.hpp" -#include "../mat2x3.hpp" -#include "../mat2x4.hpp" -#include "../mat3x2.hpp" -#include "../mat3x3.hpp" -#include "../mat3x4.hpp" -#include "../mat4x2.hpp" -#include "../mat4x3.hpp" -#include "../mat4x4.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_matrix_integer extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_matrix_integer - /// @{ - - /// High-qualifier signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, int, highp> highp_imat2; - - /// High-qualifier signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, int, highp> highp_imat3; - - /// High-qualifier signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, int, highp> highp_imat4; - - /// High-qualifier signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, int, highp> highp_imat2x2; - - /// High-qualifier signed integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 3, int, highp> highp_imat2x3; - - /// High-qualifier signed integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 4, int, highp> highp_imat2x4; - - /// High-qualifier signed integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 2, int, highp> highp_imat3x2; - - /// High-qualifier signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, int, highp> highp_imat3x3; - - /// High-qualifier signed integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 4, int, highp> highp_imat3x4; - - /// High-qualifier signed integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 2, int, highp> highp_imat4x2; - - /// High-qualifier signed integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 3, int, highp> highp_imat4x3; - - /// High-qualifier signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, int, highp> highp_imat4x4; - - - /// Medium-qualifier signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, int, mediump> mediump_imat2; - - /// Medium-qualifier signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, int, mediump> mediump_imat3; - - /// Medium-qualifier signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, int, mediump> mediump_imat4; - - - /// Medium-qualifier signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, int, mediump> mediump_imat2x2; - - /// Medium-qualifier signed integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 3, int, mediump> mediump_imat2x3; - - /// Medium-qualifier signed integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 4, int, mediump> mediump_imat2x4; - - /// Medium-qualifier signed integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 2, int, mediump> mediump_imat3x2; - - /// Medium-qualifier signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, int, mediump> mediump_imat3x3; - - /// Medium-qualifier signed integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 4, int, mediump> mediump_imat3x4; - - /// Medium-qualifier signed integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 2, int, mediump> mediump_imat4x2; - - /// Medium-qualifier signed integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 3, int, mediump> mediump_imat4x3; - - /// Medium-qualifier signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, int, mediump> mediump_imat4x4; - - - /// Low-qualifier signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, int, lowp> lowp_imat2; - - /// Low-qualifier signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, int, lowp> lowp_imat3; - - /// Low-qualifier signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, int, lowp> lowp_imat4; - - - /// Low-qualifier signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, int, lowp> lowp_imat2x2; - - /// Low-qualifier signed integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 3, int, lowp> lowp_imat2x3; - - /// Low-qualifier signed integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 4, int, lowp> lowp_imat2x4; - - /// Low-qualifier signed integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 2, int, lowp> lowp_imat3x2; - - /// Low-qualifier signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, int, lowp> lowp_imat3x3; - - /// Low-qualifier signed integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 4, int, lowp> lowp_imat3x4; - - /// Low-qualifier signed integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 2, int, lowp> lowp_imat4x2; - - /// Low-qualifier signed integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 3, int, lowp> lowp_imat4x3; - - /// Low-qualifier signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, int, lowp> lowp_imat4x4; - - - /// High-qualifier unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, uint, highp> highp_umat2; - - /// High-qualifier unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, uint, highp> highp_umat3; - - /// High-qualifier unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, uint, highp> highp_umat4; - - /// High-qualifier unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, uint, highp> highp_umat2x2; - - /// High-qualifier unsigned integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 3, uint, highp> highp_umat2x3; - - /// High-qualifier unsigned integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 4, uint, highp> highp_umat2x4; - - /// High-qualifier unsigned integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 2, uint, highp> highp_umat3x2; - - /// High-qualifier unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, uint, highp> highp_umat3x3; - - /// High-qualifier unsigned integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 4, uint, highp> highp_umat3x4; - - /// High-qualifier unsigned integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 2, uint, highp> highp_umat4x2; - - /// High-qualifier unsigned integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 3, uint, highp> highp_umat4x3; - - /// High-qualifier unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, uint, highp> highp_umat4x4; - - - /// Medium-qualifier unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, uint, mediump> mediump_umat2; - - /// Medium-qualifier unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, uint, mediump> mediump_umat3; - - /// Medium-qualifier unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, uint, mediump> mediump_umat4; - - - /// Medium-qualifier unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, uint, mediump> mediump_umat2x2; - - /// Medium-qualifier unsigned integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 3, uint, mediump> mediump_umat2x3; - - /// Medium-qualifier unsigned integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 4, uint, mediump> mediump_umat2x4; - - /// Medium-qualifier unsigned integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 2, uint, mediump> mediump_umat3x2; - - /// Medium-qualifier unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, uint, mediump> mediump_umat3x3; - - /// Medium-qualifier unsigned integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 4, uint, mediump> mediump_umat3x4; - - /// Medium-qualifier unsigned integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 2, uint, mediump> mediump_umat4x2; - - /// Medium-qualifier unsigned integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 3, uint, mediump> mediump_umat4x3; - - /// Medium-qualifier unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, uint, mediump> mediump_umat4x4; - - - /// Low-qualifier unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, uint, lowp> lowp_umat2; - - /// Low-qualifier unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, uint, lowp> lowp_umat3; - - /// Low-qualifier unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, uint, lowp> lowp_umat4; - - - /// Low-qualifier unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, uint, lowp> lowp_umat2x2; - - /// Low-qualifier unsigned integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 3, uint, lowp> lowp_umat2x3; - - /// Low-qualifier unsigned integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 4, uint, lowp> lowp_umat2x4; - - /// Low-qualifier unsigned integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 2, uint, lowp> lowp_umat3x2; - - /// Low-qualifier unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, uint, lowp> lowp_umat3x3; - - /// Low-qualifier unsigned integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 4, uint, lowp> lowp_umat3x4; - - /// Low-qualifier unsigned integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 2, uint, lowp> lowp_umat4x2; - - /// Low-qualifier unsigned integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 3, uint, lowp> lowp_umat4x3; - - /// Low-qualifier unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, uint, lowp> lowp_umat4x4; - -#if(defined(GLM_PRECISION_HIGHP_INT)) - typedef highp_imat2 imat2; - typedef highp_imat3 imat3; - typedef highp_imat4 imat4; - typedef highp_imat2x2 imat2x2; - typedef highp_imat2x3 imat2x3; - typedef highp_imat2x4 imat2x4; - typedef highp_imat3x2 imat3x2; - typedef highp_imat3x3 imat3x3; - typedef highp_imat3x4 imat3x4; - typedef highp_imat4x2 imat4x2; - typedef highp_imat4x3 imat4x3; - typedef highp_imat4x4 imat4x4; -#elif(defined(GLM_PRECISION_LOWP_INT)) - typedef lowp_imat2 imat2; - typedef lowp_imat3 imat3; - typedef lowp_imat4 imat4; - typedef lowp_imat2x2 imat2x2; - typedef lowp_imat2x3 imat2x3; - typedef lowp_imat2x4 imat2x4; - typedef lowp_imat3x2 imat3x2; - typedef lowp_imat3x3 imat3x3; - typedef lowp_imat3x4 imat3x4; - typedef lowp_imat4x2 imat4x2; - typedef lowp_imat4x3 imat4x3; - typedef lowp_imat4x4 imat4x4; -#else //if(defined(GLM_PRECISION_MEDIUMP_INT)) - - /// Signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat2 imat2; - - /// Signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat3 imat3; - - /// Signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat4 imat4; - - /// Signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat2x2 imat2x2; - - /// Signed integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat2x3 imat2x3; - - /// Signed integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat2x4 imat2x4; - - /// Signed integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat3x2 imat3x2; - - /// Signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat3x3 imat3x3; - - /// Signed integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat3x4 imat3x4; - - /// Signed integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat4x2 imat4x2; - - /// Signed integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat4x3 imat4x3; - - /// Signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat4x4 imat4x4; -#endif//GLM_PRECISION - -#if(defined(GLM_PRECISION_HIGHP_UINT)) - typedef highp_umat2 umat2; - typedef highp_umat3 umat3; - typedef highp_umat4 umat4; - typedef highp_umat2x2 umat2x2; - typedef highp_umat2x3 umat2x3; - typedef highp_umat2x4 umat2x4; - typedef highp_umat3x2 umat3x2; - typedef highp_umat3x3 umat3x3; - typedef highp_umat3x4 umat3x4; - typedef highp_umat4x2 umat4x2; - typedef highp_umat4x3 umat4x3; - typedef highp_umat4x4 umat4x4; -#elif(defined(GLM_PRECISION_LOWP_UINT)) - typedef lowp_umat2 umat2; - typedef lowp_umat3 umat3; - typedef lowp_umat4 umat4; - typedef lowp_umat2x2 umat2x2; - typedef lowp_umat2x3 umat2x3; - typedef lowp_umat2x4 umat2x4; - typedef lowp_umat3x2 umat3x2; - typedef lowp_umat3x3 umat3x3; - typedef lowp_umat3x4 umat3x4; - typedef lowp_umat4x2 umat4x2; - typedef lowp_umat4x3 umat4x3; - typedef lowp_umat4x4 umat4x4; -#else //if(defined(GLM_PRECISION_MEDIUMP_UINT)) - - /// Unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat2 umat2; - - /// Unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat3 umat3; - - /// Unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat4 umat4; - - /// Unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat2x2 umat2x2; - - /// Unsigned integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat2x3 umat2x3; - - /// Unsigned integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat2x4 umat2x4; - - /// Unsigned integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat3x2 umat3x2; - - /// Unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat3x3 umat3x3; - - /// Unsigned integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat3x4 umat3x4; - - /// Unsigned integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat4x2 umat4x2; - - /// Unsigned integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat4x3 umat4x3; - - /// Unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat4x4 umat4x4; -#endif//GLM_PRECISION - - /// @} -}//namespace glm diff --git a/third_party/glm/gtc/matrix_inverse.hpp b/third_party/glm/gtc/matrix_inverse.hpp deleted file mode 100755 index a1900ad..0000000 --- a/third_party/glm/gtc/matrix_inverse.hpp +++ /dev/null @@ -1,50 +0,0 @@ -/// @ref gtc_matrix_inverse -/// @file glm/gtc/matrix_inverse.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_matrix_inverse GLM_GTC_matrix_inverse -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Defines additional matrix inverting functions. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../matrix.hpp" -#include "../mat2x2.hpp" -#include "../mat3x3.hpp" -#include "../mat4x4.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_matrix_inverse extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_matrix_inverse - /// @{ - - /// Fast matrix inverse for affine matrix. - /// - /// @param m Input matrix to invert. - /// @tparam genType Squared floating-point matrix: half, float or double. Inverse of matrix based of half-qualifier floating point value is highly innacurate. - /// @see gtc_matrix_inverse - template - GLM_FUNC_DECL genType affineInverse(genType const& m); - - /// Compute the inverse transpose of a matrix. - /// - /// @param m Input matrix to invert transpose. - /// @tparam genType Squared floating-point matrix: half, float or double. Inverse of matrix based of half-qualifier floating point value is highly innacurate. - /// @see gtc_matrix_inverse - template - GLM_FUNC_DECL genType inverseTranspose(genType const& m); - - /// @} -}//namespace glm - -#include "matrix_inverse.inl" diff --git a/third_party/glm/gtc/matrix_inverse.inl b/third_party/glm/gtc/matrix_inverse.inl deleted file mode 100755 index c004b9e..0000000 --- a/third_party/glm/gtc/matrix_inverse.inl +++ /dev/null @@ -1,118 +0,0 @@ -/// @ref gtc_matrix_inverse - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> affineInverse(mat<3, 3, T, Q> const& m) - { - mat<2, 2, T, Q> const Inv(inverse(mat<2, 2, T, Q>(m))); - - return mat<3, 3, T, Q>( - vec<3, T, Q>(Inv[0], static_cast(0)), - vec<3, T, Q>(Inv[1], static_cast(0)), - vec<3, T, Q>(-Inv * vec<2, T, Q>(m[2]), static_cast(1))); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> affineInverse(mat<4, 4, T, Q> const& m) - { - mat<3, 3, T, Q> const Inv(inverse(mat<3, 3, T, Q>(m))); - - return mat<4, 4, T, Q>( - vec<4, T, Q>(Inv[0], static_cast(0)), - vec<4, T, Q>(Inv[1], static_cast(0)), - vec<4, T, Q>(Inv[2], static_cast(0)), - vec<4, T, Q>(-Inv * vec<3, T, Q>(m[3]), static_cast(1))); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> inverseTranspose(mat<2, 2, T, Q> const& m) - { - T Determinant = m[0][0] * m[1][1] - m[1][0] * m[0][1]; - - mat<2, 2, T, Q> Inverse( - + m[1][1] / Determinant, - - m[0][1] / Determinant, - - m[1][0] / Determinant, - + m[0][0] / Determinant); - - return Inverse; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> inverseTranspose(mat<3, 3, T, Q> const& m) - { - T Determinant = - + m[0][0] * (m[1][1] * m[2][2] - m[1][2] * m[2][1]) - - m[0][1] * (m[1][0] * m[2][2] - m[1][2] * m[2][0]) - + m[0][2] * (m[1][0] * m[2][1] - m[1][1] * m[2][0]); - - mat<3, 3, T, Q> Inverse; - Inverse[0][0] = + (m[1][1] * m[2][2] - m[2][1] * m[1][2]); - Inverse[0][1] = - (m[1][0] * m[2][2] - m[2][0] * m[1][2]); - Inverse[0][2] = + (m[1][0] * m[2][1] - m[2][0] * m[1][1]); - Inverse[1][0] = - (m[0][1] * m[2][2] - m[2][1] * m[0][2]); - Inverse[1][1] = + (m[0][0] * m[2][2] - m[2][0] * m[0][2]); - Inverse[1][2] = - (m[0][0] * m[2][1] - m[2][0] * m[0][1]); - Inverse[2][0] = + (m[0][1] * m[1][2] - m[1][1] * m[0][2]); - Inverse[2][1] = - (m[0][0] * m[1][2] - m[1][0] * m[0][2]); - Inverse[2][2] = + (m[0][0] * m[1][1] - m[1][0] * m[0][1]); - Inverse /= Determinant; - - return Inverse; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> inverseTranspose(mat<4, 4, T, Q> const& m) - { - T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - T SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; - T SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; - T SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; - T SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; - T SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; - T SubFactor11 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; - T SubFactor12 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; - T SubFactor13 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; - T SubFactor14 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; - T SubFactor15 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; - T SubFactor16 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; - T SubFactor17 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - mat<4, 4, T, Q> Inverse; - Inverse[0][0] = + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02); - Inverse[0][1] = - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04); - Inverse[0][2] = + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05); - Inverse[0][3] = - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05); - - Inverse[1][0] = - (m[0][1] * SubFactor00 - m[0][2] * SubFactor01 + m[0][3] * SubFactor02); - Inverse[1][1] = + (m[0][0] * SubFactor00 - m[0][2] * SubFactor03 + m[0][3] * SubFactor04); - Inverse[1][2] = - (m[0][0] * SubFactor01 - m[0][1] * SubFactor03 + m[0][3] * SubFactor05); - Inverse[1][3] = + (m[0][0] * SubFactor02 - m[0][1] * SubFactor04 + m[0][2] * SubFactor05); - - Inverse[2][0] = + (m[0][1] * SubFactor06 - m[0][2] * SubFactor07 + m[0][3] * SubFactor08); - Inverse[2][1] = - (m[0][0] * SubFactor06 - m[0][2] * SubFactor09 + m[0][3] * SubFactor10); - Inverse[2][2] = + (m[0][0] * SubFactor07 - m[0][1] * SubFactor09 + m[0][3] * SubFactor11); - Inverse[2][3] = - (m[0][0] * SubFactor08 - m[0][1] * SubFactor10 + m[0][2] * SubFactor11); - - Inverse[3][0] = - (m[0][1] * SubFactor12 - m[0][2] * SubFactor13 + m[0][3] * SubFactor14); - Inverse[3][1] = + (m[0][0] * SubFactor12 - m[0][2] * SubFactor15 + m[0][3] * SubFactor16); - Inverse[3][2] = - (m[0][0] * SubFactor13 - m[0][1] * SubFactor15 + m[0][3] * SubFactor17); - Inverse[3][3] = + (m[0][0] * SubFactor14 - m[0][1] * SubFactor16 + m[0][2] * SubFactor17); - - T Determinant = - + m[0][0] * Inverse[0][0] - + m[0][1] * Inverse[0][1] - + m[0][2] * Inverse[0][2] - + m[0][3] * Inverse[0][3]; - - Inverse /= Determinant; - - return Inverse; - } -}//namespace glm diff --git a/third_party/glm/gtc/matrix_transform.hpp b/third_party/glm/gtc/matrix_transform.hpp deleted file mode 100755 index 612418f..0000000 --- a/third_party/glm/gtc/matrix_transform.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref gtc_matrix_transform -/// @file glm/gtc/matrix_transform.hpp -/// -/// @see core (dependence) -/// @see gtx_transform -/// @see gtx_transform2 -/// -/// @defgroup gtc_matrix_transform GLM_GTC_matrix_transform -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Defines functions that generate common transformation matrices. -/// -/// The matrices generated by this extension use standard OpenGL fixed-function -/// conventions. For example, the lookAt function generates a transform from world -/// space into the specific eye space that the projective matrix functions -/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility -/// specifications defines the particular layout of this eye space. - -#pragma once - -// Dependencies -#include "../mat4x4.hpp" -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include "../ext/matrix_projection.hpp" -#include "../ext/matrix_clip_space.hpp" -#include "../ext/matrix_transform.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_matrix_transform extension included") -#endif - -#include "matrix_transform.inl" diff --git a/third_party/glm/gtc/matrix_transform.inl b/third_party/glm/gtc/matrix_transform.inl deleted file mode 100755 index 15b46bc..0000000 --- a/third_party/glm/gtc/matrix_transform.inl +++ /dev/null @@ -1,3 +0,0 @@ -#include "../geometric.hpp" -#include "../trigonometric.hpp" -#include "../matrix.hpp" diff --git a/third_party/glm/gtc/noise.hpp b/third_party/glm/gtc/noise.hpp deleted file mode 100755 index ab1772e..0000000 --- a/third_party/glm/gtc/noise.hpp +++ /dev/null @@ -1,61 +0,0 @@ -/// @ref gtc_noise -/// @file glm/gtc/noise.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_noise GLM_GTC_noise -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Defines 2D, 3D and 4D procedural noise functions -/// Based on the work of Stefan Gustavson and Ashima Arts on "webgl-noise": -/// https://github.com/ashima/webgl-noise -/// Following Stefan Gustavson's paper "Simplex noise demystified": -/// http://www.itn.liu.se/~stegu/simplexnoise/simplexnoise.pdf - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/_noise.hpp" -#include "../geometric.hpp" -#include "../common.hpp" -#include "../vector_relational.hpp" -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_noise extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_noise - /// @{ - - /// Classic perlin noise. - /// @see gtc_noise - template - GLM_FUNC_DECL T perlin( - vec const& p); - - /// Periodic perlin noise. - /// @see gtc_noise - template - GLM_FUNC_DECL T perlin( - vec const& p, - vec const& rep); - - /// Simplex noise. - /// @see gtc_noise - template - GLM_FUNC_DECL T simplex( - vec const& p); - - /// @} -}//namespace glm - -#include "noise.inl" diff --git a/third_party/glm/gtc/noise.inl b/third_party/glm/gtc/noise.inl deleted file mode 100755 index 30d0b27..0000000 --- a/third_party/glm/gtc/noise.inl +++ /dev/null @@ -1,807 +0,0 @@ -/// @ref gtc_noise -/// -// Based on the work of Stefan Gustavson and Ashima Arts on "webgl-noise": -// https://github.com/ashima/webgl-noise -// Following Stefan Gustavson's paper "Simplex noise demystified": -// http://www.itn.liu.se/~stegu/simplexnoise/simplexnoise.pdf - -namespace glm{ -namespace gtc -{ - template - GLM_FUNC_QUALIFIER vec<4, T, Q> grad4(T const& j, vec<4, T, Q> const& ip) - { - vec<3, T, Q> pXYZ = floor(fract(vec<3, T, Q>(j) * vec<3, T, Q>(ip)) * T(7)) * ip[2] - T(1); - T pW = static_cast(1.5) - dot(abs(pXYZ), vec<3, T, Q>(1)); - vec<4, T, Q> s = vec<4, T, Q>(lessThan(vec<4, T, Q>(pXYZ, pW), vec<4, T, Q>(0.0))); - pXYZ = pXYZ + (vec<3, T, Q>(s) * T(2) - T(1)) * s.w; - return vec<4, T, Q>(pXYZ, pW); - } -}//namespace gtc - - // Classic Perlin noise - template - GLM_FUNC_QUALIFIER T perlin(vec<2, T, Q> const& Position) - { - vec<4, T, Q> Pi = glm::floor(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) + vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); - vec<4, T, Q> Pf = glm::fract(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) - vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); - Pi = mod(Pi, vec<4, T, Q>(289)); // To avoid truncation effects in permutation - vec<4, T, Q> ix(Pi.x, Pi.z, Pi.x, Pi.z); - vec<4, T, Q> iy(Pi.y, Pi.y, Pi.w, Pi.w); - vec<4, T, Q> fx(Pf.x, Pf.z, Pf.x, Pf.z); - vec<4, T, Q> fy(Pf.y, Pf.y, Pf.w, Pf.w); - - vec<4, T, Q> i = detail::permute(detail::permute(ix) + iy); - - vec<4, T, Q> gx = static_cast(2) * glm::fract(i / T(41)) - T(1); - vec<4, T, Q> gy = glm::abs(gx) - T(0.5); - vec<4, T, Q> tx = glm::floor(gx + T(0.5)); - gx = gx - tx; - - vec<2, T, Q> g00(gx.x, gy.x); - vec<2, T, Q> g10(gx.y, gy.y); - vec<2, T, Q> g01(gx.z, gy.z); - vec<2, T, Q> g11(gx.w, gy.w); - - vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11))); - g00 *= norm.x; - g01 *= norm.y; - g10 *= norm.z; - g11 *= norm.w; - - T n00 = dot(g00, vec<2, T, Q>(fx.x, fy.x)); - T n10 = dot(g10, vec<2, T, Q>(fx.y, fy.y)); - T n01 = dot(g01, vec<2, T, Q>(fx.z, fy.z)); - T n11 = dot(g11, vec<2, T, Q>(fx.w, fy.w)); - - vec<2, T, Q> fade_xy = detail::fade(vec<2, T, Q>(Pf.x, Pf.y)); - vec<2, T, Q> n_x = mix(vec<2, T, Q>(n00, n01), vec<2, T, Q>(n10, n11), fade_xy.x); - T n_xy = mix(n_x.x, n_x.y, fade_xy.y); - return T(2.3) * n_xy; - } - - // Classic Perlin noise - template - GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& Position) - { - vec<3, T, Q> Pi0 = floor(Position); // Integer part for indexing - vec<3, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1 - Pi0 = detail::mod289(Pi0); - Pi1 = detail::mod289(Pi1); - vec<3, T, Q> Pf0 = fract(Position); // Fractional part for interpolation - vec<3, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 - vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x); - vec<4, T, Q> iy = vec<4, T, Q>(vec<2, T, Q>(Pi0.y), vec<2, T, Q>(Pi1.y)); - vec<4, T, Q> iz0(Pi0.z); - vec<4, T, Q> iz1(Pi1.z); - - vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); - vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); - vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); - - vec<4, T, Q> gx0 = ixy0 * T(1.0 / 7.0); - vec<4, T, Q> gy0 = fract(floor(gx0) * T(1.0 / 7.0)) - T(0.5); - gx0 = fract(gx0); - vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0); - vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0.0)); - gx0 -= sz0 * (step(T(0), gx0) - T(0.5)); - gy0 -= sz0 * (step(T(0), gy0) - T(0.5)); - - vec<4, T, Q> gx1 = ixy1 * T(1.0 / 7.0); - vec<4, T, Q> gy1 = fract(floor(gx1) * T(1.0 / 7.0)) - T(0.5); - gx1 = fract(gx1); - vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1); - vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(0.0)); - gx1 -= sz1 * (step(T(0), gx1) - T(0.5)); - gy1 -= sz1 * (step(T(0), gy1) - T(0.5)); - - vec<3, T, Q> g000(gx0.x, gy0.x, gz0.x); - vec<3, T, Q> g100(gx0.y, gy0.y, gz0.y); - vec<3, T, Q> g010(gx0.z, gy0.z, gz0.z); - vec<3, T, Q> g110(gx0.w, gy0.w, gz0.w); - vec<3, T, Q> g001(gx1.x, gy1.x, gz1.x); - vec<3, T, Q> g101(gx1.y, gy1.y, gz1.y); - vec<3, T, Q> g011(gx1.z, gy1.z, gz1.z); - vec<3, T, Q> g111(gx1.w, gy1.w, gz1.w); - - vec<4, T, Q> norm0 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110))); - g000 *= norm0.x; - g010 *= norm0.y; - g100 *= norm0.z; - g110 *= norm0.w; - vec<4, T, Q> norm1 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111))); - g001 *= norm1.x; - g011 *= norm1.y; - g101 *= norm1.z; - g111 *= norm1.w; - - T n000 = dot(g000, Pf0); - T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z)); - T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z)); - T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z)); - T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z)); - T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z)); - T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z)); - T n111 = dot(g111, Pf1); - - vec<3, T, Q> fade_xyz = detail::fade(Pf0); - vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z); - vec<2, T, Q> n_yz = mix(vec<2, T, Q>(n_z.x, n_z.y), vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y); - T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x); - return T(2.2) * n_xyz; - } - /* - // Classic Perlin noise - template - GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& P) - { - vec<3, T, Q> Pi0 = floor(P); // Integer part for indexing - vec<3, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1 - Pi0 = mod(Pi0, T(289)); - Pi1 = mod(Pi1, T(289)); - vec<3, T, Q> Pf0 = fract(P); // Fractional part for interpolation - vec<3, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 - vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x); - vec<4, T, Q> iy(Pi0.y, Pi0.y, Pi1.y, Pi1.y); - vec<4, T, Q> iz0(Pi0.z); - vec<4, T, Q> iz1(Pi1.z); - - vec<4, T, Q> ixy = permute(permute(ix) + iy); - vec<4, T, Q> ixy0 = permute(ixy + iz0); - vec<4, T, Q> ixy1 = permute(ixy + iz1); - - vec<4, T, Q> gx0 = ixy0 / T(7); - vec<4, T, Q> gy0 = fract(floor(gx0) / T(7)) - T(0.5); - gx0 = fract(gx0); - vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0); - vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0.0)); - gx0 -= sz0 * (step(0.0, gx0) - T(0.5)); - gy0 -= sz0 * (step(0.0, gy0) - T(0.5)); - - vec<4, T, Q> gx1 = ixy1 / T(7); - vec<4, T, Q> gy1 = fract(floor(gx1) / T(7)) - T(0.5); - gx1 = fract(gx1); - vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1); - vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(0.0)); - gx1 -= sz1 * (step(T(0), gx1) - T(0.5)); - gy1 -= sz1 * (step(T(0), gy1) - T(0.5)); - - vec<3, T, Q> g000(gx0.x, gy0.x, gz0.x); - vec<3, T, Q> g100(gx0.y, gy0.y, gz0.y); - vec<3, T, Q> g010(gx0.z, gy0.z, gz0.z); - vec<3, T, Q> g110(gx0.w, gy0.w, gz0.w); - vec<3, T, Q> g001(gx1.x, gy1.x, gz1.x); - vec<3, T, Q> g101(gx1.y, gy1.y, gz1.y); - vec<3, T, Q> g011(gx1.z, gy1.z, gz1.z); - vec<3, T, Q> g111(gx1.w, gy1.w, gz1.w); - - vec<4, T, Q> norm0 = taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110))); - g000 *= norm0.x; - g010 *= norm0.y; - g100 *= norm0.z; - g110 *= norm0.w; - vec<4, T, Q> norm1 = taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111))); - g001 *= norm1.x; - g011 *= norm1.y; - g101 *= norm1.z; - g111 *= norm1.w; - - T n000 = dot(g000, Pf0); - T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z)); - T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z)); - T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z)); - T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z)); - T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z)); - T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z)); - T n111 = dot(g111, Pf1); - - vec<3, T, Q> fade_xyz = fade(Pf0); - vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z); - vec<2, T, Q> n_yz = mix( - vec<2, T, Q>(n_z.x, n_z.y), - vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y); - T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x); - return T(2.2) * n_xyz; - } - */ - // Classic Perlin noise - template - GLM_FUNC_QUALIFIER T perlin(vec<4, T, Q> const& Position) - { - vec<4, T, Q> Pi0 = floor(Position); // Integer part for indexing - vec<4, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1 - Pi0 = mod(Pi0, vec<4, T, Q>(289)); - Pi1 = mod(Pi1, vec<4, T, Q>(289)); - vec<4, T, Q> Pf0 = fract(Position); // Fractional part for interpolation - vec<4, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 - vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x); - vec<4, T, Q> iy(Pi0.y, Pi0.y, Pi1.y, Pi1.y); - vec<4, T, Q> iz0(Pi0.z); - vec<4, T, Q> iz1(Pi1.z); - vec<4, T, Q> iw0(Pi0.w); - vec<4, T, Q> iw1(Pi1.w); - - vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); - vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); - vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); - vec<4, T, Q> ixy00 = detail::permute(ixy0 + iw0); - vec<4, T, Q> ixy01 = detail::permute(ixy0 + iw1); - vec<4, T, Q> ixy10 = detail::permute(ixy1 + iw0); - vec<4, T, Q> ixy11 = detail::permute(ixy1 + iw1); - - vec<4, T, Q> gx00 = ixy00 / T(7); - vec<4, T, Q> gy00 = floor(gx00) / T(7); - vec<4, T, Q> gz00 = floor(gy00) / T(6); - gx00 = fract(gx00) - T(0.5); - gy00 = fract(gy00) - T(0.5); - gz00 = fract(gz00) - T(0.5); - vec<4, T, Q> gw00 = vec<4, T, Q>(0.75) - abs(gx00) - abs(gy00) - abs(gz00); - vec<4, T, Q> sw00 = step(gw00, vec<4, T, Q>(0.0)); - gx00 -= sw00 * (step(T(0), gx00) - T(0.5)); - gy00 -= sw00 * (step(T(0), gy00) - T(0.5)); - - vec<4, T, Q> gx01 = ixy01 / T(7); - vec<4, T, Q> gy01 = floor(gx01) / T(7); - vec<4, T, Q> gz01 = floor(gy01) / T(6); - gx01 = fract(gx01) - T(0.5); - gy01 = fract(gy01) - T(0.5); - gz01 = fract(gz01) - T(0.5); - vec<4, T, Q> gw01 = vec<4, T, Q>(0.75) - abs(gx01) - abs(gy01) - abs(gz01); - vec<4, T, Q> sw01 = step(gw01, vec<4, T, Q>(0.0)); - gx01 -= sw01 * (step(T(0), gx01) - T(0.5)); - gy01 -= sw01 * (step(T(0), gy01) - T(0.5)); - - vec<4, T, Q> gx10 = ixy10 / T(7); - vec<4, T, Q> gy10 = floor(gx10) / T(7); - vec<4, T, Q> gz10 = floor(gy10) / T(6); - gx10 = fract(gx10) - T(0.5); - gy10 = fract(gy10) - T(0.5); - gz10 = fract(gz10) - T(0.5); - vec<4, T, Q> gw10 = vec<4, T, Q>(0.75) - abs(gx10) - abs(gy10) - abs(gz10); - vec<4, T, Q> sw10 = step(gw10, vec<4, T, Q>(0)); - gx10 -= sw10 * (step(T(0), gx10) - T(0.5)); - gy10 -= sw10 * (step(T(0), gy10) - T(0.5)); - - vec<4, T, Q> gx11 = ixy11 / T(7); - vec<4, T, Q> gy11 = floor(gx11) / T(7); - vec<4, T, Q> gz11 = floor(gy11) / T(6); - gx11 = fract(gx11) - T(0.5); - gy11 = fract(gy11) - T(0.5); - gz11 = fract(gz11) - T(0.5); - vec<4, T, Q> gw11 = vec<4, T, Q>(0.75) - abs(gx11) - abs(gy11) - abs(gz11); - vec<4, T, Q> sw11 = step(gw11, vec<4, T, Q>(0.0)); - gx11 -= sw11 * (step(T(0), gx11) - T(0.5)); - gy11 -= sw11 * (step(T(0), gy11) - T(0.5)); - - vec<4, T, Q> g0000(gx00.x, gy00.x, gz00.x, gw00.x); - vec<4, T, Q> g1000(gx00.y, gy00.y, gz00.y, gw00.y); - vec<4, T, Q> g0100(gx00.z, gy00.z, gz00.z, gw00.z); - vec<4, T, Q> g1100(gx00.w, gy00.w, gz00.w, gw00.w); - vec<4, T, Q> g0010(gx10.x, gy10.x, gz10.x, gw10.x); - vec<4, T, Q> g1010(gx10.y, gy10.y, gz10.y, gw10.y); - vec<4, T, Q> g0110(gx10.z, gy10.z, gz10.z, gw10.z); - vec<4, T, Q> g1110(gx10.w, gy10.w, gz10.w, gw10.w); - vec<4, T, Q> g0001(gx01.x, gy01.x, gz01.x, gw01.x); - vec<4, T, Q> g1001(gx01.y, gy01.y, gz01.y, gw01.y); - vec<4, T, Q> g0101(gx01.z, gy01.z, gz01.z, gw01.z); - vec<4, T, Q> g1101(gx01.w, gy01.w, gz01.w, gw01.w); - vec<4, T, Q> g0011(gx11.x, gy11.x, gz11.x, gw11.x); - vec<4, T, Q> g1011(gx11.y, gy11.y, gz11.y, gw11.y); - vec<4, T, Q> g0111(gx11.z, gy11.z, gz11.z, gw11.z); - vec<4, T, Q> g1111(gx11.w, gy11.w, gz11.w, gw11.w); - - vec<4, T, Q> norm00 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0000, g0000), dot(g0100, g0100), dot(g1000, g1000), dot(g1100, g1100))); - g0000 *= norm00.x; - g0100 *= norm00.y; - g1000 *= norm00.z; - g1100 *= norm00.w; - - vec<4, T, Q> norm01 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0001, g0001), dot(g0101, g0101), dot(g1001, g1001), dot(g1101, g1101))); - g0001 *= norm01.x; - g0101 *= norm01.y; - g1001 *= norm01.z; - g1101 *= norm01.w; - - vec<4, T, Q> norm10 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0010, g0010), dot(g0110, g0110), dot(g1010, g1010), dot(g1110, g1110))); - g0010 *= norm10.x; - g0110 *= norm10.y; - g1010 *= norm10.z; - g1110 *= norm10.w; - - vec<4, T, Q> norm11 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0011, g0011), dot(g0111, g0111), dot(g1011, g1011), dot(g1111, g1111))); - g0011 *= norm11.x; - g0111 *= norm11.y; - g1011 *= norm11.z; - g1111 *= norm11.w; - - T n0000 = dot(g0000, Pf0); - T n1000 = dot(g1000, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf0.w)); - T n0100 = dot(g0100, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf0.w)); - T n1100 = dot(g1100, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf0.w)); - T n0010 = dot(g0010, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf0.w)); - T n1010 = dot(g1010, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf0.w)); - T n0110 = dot(g0110, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf0.w)); - T n1110 = dot(g1110, vec<4, T, Q>(Pf1.x, Pf1.y, Pf1.z, Pf0.w)); - T n0001 = dot(g0001, vec<4, T, Q>(Pf0.x, Pf0.y, Pf0.z, Pf1.w)); - T n1001 = dot(g1001, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf1.w)); - T n0101 = dot(g0101, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf1.w)); - T n1101 = dot(g1101, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf1.w)); - T n0011 = dot(g0011, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf1.w)); - T n1011 = dot(g1011, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf1.w)); - T n0111 = dot(g0111, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf1.w)); - T n1111 = dot(g1111, Pf1); - - vec<4, T, Q> fade_xyzw = detail::fade(Pf0); - vec<4, T, Q> n_0w = mix(vec<4, T, Q>(n0000, n1000, n0100, n1100), vec<4, T, Q>(n0001, n1001, n0101, n1101), fade_xyzw.w); - vec<4, T, Q> n_1w = mix(vec<4, T, Q>(n0010, n1010, n0110, n1110), vec<4, T, Q>(n0011, n1011, n0111, n1111), fade_xyzw.w); - vec<4, T, Q> n_zw = mix(n_0w, n_1w, fade_xyzw.z); - vec<2, T, Q> n_yzw = mix(vec<2, T, Q>(n_zw.x, n_zw.y), vec<2, T, Q>(n_zw.z, n_zw.w), fade_xyzw.y); - T n_xyzw = mix(n_yzw.x, n_yzw.y, fade_xyzw.x); - return T(2.2) * n_xyzw; - } - - // Classic Perlin noise, periodic variant - template - GLM_FUNC_QUALIFIER T perlin(vec<2, T, Q> const& Position, vec<2, T, Q> const& rep) - { - vec<4, T, Q> Pi = floor(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) + vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); - vec<4, T, Q> Pf = fract(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) - vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); - Pi = mod(Pi, vec<4, T, Q>(rep.x, rep.y, rep.x, rep.y)); // To create noise with explicit period - Pi = mod(Pi, vec<4, T, Q>(289)); // To avoid truncation effects in permutation - vec<4, T, Q> ix(Pi.x, Pi.z, Pi.x, Pi.z); - vec<4, T, Q> iy(Pi.y, Pi.y, Pi.w, Pi.w); - vec<4, T, Q> fx(Pf.x, Pf.z, Pf.x, Pf.z); - vec<4, T, Q> fy(Pf.y, Pf.y, Pf.w, Pf.w); - - vec<4, T, Q> i = detail::permute(detail::permute(ix) + iy); - - vec<4, T, Q> gx = static_cast(2) * fract(i / T(41)) - T(1); - vec<4, T, Q> gy = abs(gx) - T(0.5); - vec<4, T, Q> tx = floor(gx + T(0.5)); - gx = gx - tx; - - vec<2, T, Q> g00(gx.x, gy.x); - vec<2, T, Q> g10(gx.y, gy.y); - vec<2, T, Q> g01(gx.z, gy.z); - vec<2, T, Q> g11(gx.w, gy.w); - - vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11))); - g00 *= norm.x; - g01 *= norm.y; - g10 *= norm.z; - g11 *= norm.w; - - T n00 = dot(g00, vec<2, T, Q>(fx.x, fy.x)); - T n10 = dot(g10, vec<2, T, Q>(fx.y, fy.y)); - T n01 = dot(g01, vec<2, T, Q>(fx.z, fy.z)); - T n11 = dot(g11, vec<2, T, Q>(fx.w, fy.w)); - - vec<2, T, Q> fade_xy = detail::fade(vec<2, T, Q>(Pf.x, Pf.y)); - vec<2, T, Q> n_x = mix(vec<2, T, Q>(n00, n01), vec<2, T, Q>(n10, n11), fade_xy.x); - T n_xy = mix(n_x.x, n_x.y, fade_xy.y); - return T(2.3) * n_xy; - } - - // Classic Perlin noise, periodic variant - template - GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& Position, vec<3, T, Q> const& rep) - { - vec<3, T, Q> Pi0 = mod(floor(Position), rep); // Integer part, modulo period - vec<3, T, Q> Pi1 = mod(Pi0 + vec<3, T, Q>(T(1)), rep); // Integer part + 1, mod period - Pi0 = mod(Pi0, vec<3, T, Q>(289)); - Pi1 = mod(Pi1, vec<3, T, Q>(289)); - vec<3, T, Q> Pf0 = fract(Position); // Fractional part for interpolation - vec<3, T, Q> Pf1 = Pf0 - vec<3, T, Q>(T(1)); // Fractional part - 1.0 - vec<4, T, Q> ix = vec<4, T, Q>(Pi0.x, Pi1.x, Pi0.x, Pi1.x); - vec<4, T, Q> iy = vec<4, T, Q>(Pi0.y, Pi0.y, Pi1.y, Pi1.y); - vec<4, T, Q> iz0(Pi0.z); - vec<4, T, Q> iz1(Pi1.z); - - vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); - vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); - vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); - - vec<4, T, Q> gx0 = ixy0 / T(7); - vec<4, T, Q> gy0 = fract(floor(gx0) / T(7)) - T(0.5); - gx0 = fract(gx0); - vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0); - vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0)); - gx0 -= sz0 * (step(T(0), gx0) - T(0.5)); - gy0 -= sz0 * (step(T(0), gy0) - T(0.5)); - - vec<4, T, Q> gx1 = ixy1 / T(7); - vec<4, T, Q> gy1 = fract(floor(gx1) / T(7)) - T(0.5); - gx1 = fract(gx1); - vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1); - vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(T(0))); - gx1 -= sz1 * (step(T(0), gx1) - T(0.5)); - gy1 -= sz1 * (step(T(0), gy1) - T(0.5)); - - vec<3, T, Q> g000 = vec<3, T, Q>(gx0.x, gy0.x, gz0.x); - vec<3, T, Q> g100 = vec<3, T, Q>(gx0.y, gy0.y, gz0.y); - vec<3, T, Q> g010 = vec<3, T, Q>(gx0.z, gy0.z, gz0.z); - vec<3, T, Q> g110 = vec<3, T, Q>(gx0.w, gy0.w, gz0.w); - vec<3, T, Q> g001 = vec<3, T, Q>(gx1.x, gy1.x, gz1.x); - vec<3, T, Q> g101 = vec<3, T, Q>(gx1.y, gy1.y, gz1.y); - vec<3, T, Q> g011 = vec<3, T, Q>(gx1.z, gy1.z, gz1.z); - vec<3, T, Q> g111 = vec<3, T, Q>(gx1.w, gy1.w, gz1.w); - - vec<4, T, Q> norm0 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110))); - g000 *= norm0.x; - g010 *= norm0.y; - g100 *= norm0.z; - g110 *= norm0.w; - vec<4, T, Q> norm1 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111))); - g001 *= norm1.x; - g011 *= norm1.y; - g101 *= norm1.z; - g111 *= norm1.w; - - T n000 = dot(g000, Pf0); - T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z)); - T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z)); - T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z)); - T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z)); - T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z)); - T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z)); - T n111 = dot(g111, Pf1); - - vec<3, T, Q> fade_xyz = detail::fade(Pf0); - vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z); - vec<2, T, Q> n_yz = mix(vec<2, T, Q>(n_z.x, n_z.y), vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y); - T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x); - return T(2.2) * n_xyz; - } - - // Classic Perlin noise, periodic version - template - GLM_FUNC_QUALIFIER T perlin(vec<4, T, Q> const& Position, vec<4, T, Q> const& rep) - { - vec<4, T, Q> Pi0 = mod(floor(Position), rep); // Integer part modulo rep - vec<4, T, Q> Pi1 = mod(Pi0 + T(1), rep); // Integer part + 1 mod rep - vec<4, T, Q> Pf0 = fract(Position); // Fractional part for interpolation - vec<4, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 - vec<4, T, Q> ix = vec<4, T, Q>(Pi0.x, Pi1.x, Pi0.x, Pi1.x); - vec<4, T, Q> iy = vec<4, T, Q>(Pi0.y, Pi0.y, Pi1.y, Pi1.y); - vec<4, T, Q> iz0(Pi0.z); - vec<4, T, Q> iz1(Pi1.z); - vec<4, T, Q> iw0(Pi0.w); - vec<4, T, Q> iw1(Pi1.w); - - vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); - vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); - vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); - vec<4, T, Q> ixy00 = detail::permute(ixy0 + iw0); - vec<4, T, Q> ixy01 = detail::permute(ixy0 + iw1); - vec<4, T, Q> ixy10 = detail::permute(ixy1 + iw0); - vec<4, T, Q> ixy11 = detail::permute(ixy1 + iw1); - - vec<4, T, Q> gx00 = ixy00 / T(7); - vec<4, T, Q> gy00 = floor(gx00) / T(7); - vec<4, T, Q> gz00 = floor(gy00) / T(6); - gx00 = fract(gx00) - T(0.5); - gy00 = fract(gy00) - T(0.5); - gz00 = fract(gz00) - T(0.5); - vec<4, T, Q> gw00 = vec<4, T, Q>(0.75) - abs(gx00) - abs(gy00) - abs(gz00); - vec<4, T, Q> sw00 = step(gw00, vec<4, T, Q>(0)); - gx00 -= sw00 * (step(T(0), gx00) - T(0.5)); - gy00 -= sw00 * (step(T(0), gy00) - T(0.5)); - - vec<4, T, Q> gx01 = ixy01 / T(7); - vec<4, T, Q> gy01 = floor(gx01) / T(7); - vec<4, T, Q> gz01 = floor(gy01) / T(6); - gx01 = fract(gx01) - T(0.5); - gy01 = fract(gy01) - T(0.5); - gz01 = fract(gz01) - T(0.5); - vec<4, T, Q> gw01 = vec<4, T, Q>(0.75) - abs(gx01) - abs(gy01) - abs(gz01); - vec<4, T, Q> sw01 = step(gw01, vec<4, T, Q>(0.0)); - gx01 -= sw01 * (step(T(0), gx01) - T(0.5)); - gy01 -= sw01 * (step(T(0), gy01) - T(0.5)); - - vec<4, T, Q> gx10 = ixy10 / T(7); - vec<4, T, Q> gy10 = floor(gx10) / T(7); - vec<4, T, Q> gz10 = floor(gy10) / T(6); - gx10 = fract(gx10) - T(0.5); - gy10 = fract(gy10) - T(0.5); - gz10 = fract(gz10) - T(0.5); - vec<4, T, Q> gw10 = vec<4, T, Q>(0.75) - abs(gx10) - abs(gy10) - abs(gz10); - vec<4, T, Q> sw10 = step(gw10, vec<4, T, Q>(0.0)); - gx10 -= sw10 * (step(T(0), gx10) - T(0.5)); - gy10 -= sw10 * (step(T(0), gy10) - T(0.5)); - - vec<4, T, Q> gx11 = ixy11 / T(7); - vec<4, T, Q> gy11 = floor(gx11) / T(7); - vec<4, T, Q> gz11 = floor(gy11) / T(6); - gx11 = fract(gx11) - T(0.5); - gy11 = fract(gy11) - T(0.5); - gz11 = fract(gz11) - T(0.5); - vec<4, T, Q> gw11 = vec<4, T, Q>(0.75) - abs(gx11) - abs(gy11) - abs(gz11); - vec<4, T, Q> sw11 = step(gw11, vec<4, T, Q>(T(0))); - gx11 -= sw11 * (step(T(0), gx11) - T(0.5)); - gy11 -= sw11 * (step(T(0), gy11) - T(0.5)); - - vec<4, T, Q> g0000(gx00.x, gy00.x, gz00.x, gw00.x); - vec<4, T, Q> g1000(gx00.y, gy00.y, gz00.y, gw00.y); - vec<4, T, Q> g0100(gx00.z, gy00.z, gz00.z, gw00.z); - vec<4, T, Q> g1100(gx00.w, gy00.w, gz00.w, gw00.w); - vec<4, T, Q> g0010(gx10.x, gy10.x, gz10.x, gw10.x); - vec<4, T, Q> g1010(gx10.y, gy10.y, gz10.y, gw10.y); - vec<4, T, Q> g0110(gx10.z, gy10.z, gz10.z, gw10.z); - vec<4, T, Q> g1110(gx10.w, gy10.w, gz10.w, gw10.w); - vec<4, T, Q> g0001(gx01.x, gy01.x, gz01.x, gw01.x); - vec<4, T, Q> g1001(gx01.y, gy01.y, gz01.y, gw01.y); - vec<4, T, Q> g0101(gx01.z, gy01.z, gz01.z, gw01.z); - vec<4, T, Q> g1101(gx01.w, gy01.w, gz01.w, gw01.w); - vec<4, T, Q> g0011(gx11.x, gy11.x, gz11.x, gw11.x); - vec<4, T, Q> g1011(gx11.y, gy11.y, gz11.y, gw11.y); - vec<4, T, Q> g0111(gx11.z, gy11.z, gz11.z, gw11.z); - vec<4, T, Q> g1111(gx11.w, gy11.w, gz11.w, gw11.w); - - vec<4, T, Q> norm00 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0000, g0000), dot(g0100, g0100), dot(g1000, g1000), dot(g1100, g1100))); - g0000 *= norm00.x; - g0100 *= norm00.y; - g1000 *= norm00.z; - g1100 *= norm00.w; - - vec<4, T, Q> norm01 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0001, g0001), dot(g0101, g0101), dot(g1001, g1001), dot(g1101, g1101))); - g0001 *= norm01.x; - g0101 *= norm01.y; - g1001 *= norm01.z; - g1101 *= norm01.w; - - vec<4, T, Q> norm10 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0010, g0010), dot(g0110, g0110), dot(g1010, g1010), dot(g1110, g1110))); - g0010 *= norm10.x; - g0110 *= norm10.y; - g1010 *= norm10.z; - g1110 *= norm10.w; - - vec<4, T, Q> norm11 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0011, g0011), dot(g0111, g0111), dot(g1011, g1011), dot(g1111, g1111))); - g0011 *= norm11.x; - g0111 *= norm11.y; - g1011 *= norm11.z; - g1111 *= norm11.w; - - T n0000 = dot(g0000, Pf0); - T n1000 = dot(g1000, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf0.w)); - T n0100 = dot(g0100, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf0.w)); - T n1100 = dot(g1100, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf0.w)); - T n0010 = dot(g0010, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf0.w)); - T n1010 = dot(g1010, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf0.w)); - T n0110 = dot(g0110, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf0.w)); - T n1110 = dot(g1110, vec<4, T, Q>(Pf1.x, Pf1.y, Pf1.z, Pf0.w)); - T n0001 = dot(g0001, vec<4, T, Q>(Pf0.x, Pf0.y, Pf0.z, Pf1.w)); - T n1001 = dot(g1001, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf1.w)); - T n0101 = dot(g0101, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf1.w)); - T n1101 = dot(g1101, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf1.w)); - T n0011 = dot(g0011, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf1.w)); - T n1011 = dot(g1011, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf1.w)); - T n0111 = dot(g0111, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf1.w)); - T n1111 = dot(g1111, Pf1); - - vec<4, T, Q> fade_xyzw = detail::fade(Pf0); - vec<4, T, Q> n_0w = mix(vec<4, T, Q>(n0000, n1000, n0100, n1100), vec<4, T, Q>(n0001, n1001, n0101, n1101), fade_xyzw.w); - vec<4, T, Q> n_1w = mix(vec<4, T, Q>(n0010, n1010, n0110, n1110), vec<4, T, Q>(n0011, n1011, n0111, n1111), fade_xyzw.w); - vec<4, T, Q> n_zw = mix(n_0w, n_1w, fade_xyzw.z); - vec<2, T, Q> n_yzw = mix(vec<2, T, Q>(n_zw.x, n_zw.y), vec<2, T, Q>(n_zw.z, n_zw.w), fade_xyzw.y); - T n_xyzw = mix(n_yzw.x, n_yzw.y, fade_xyzw.x); - return T(2.2) * n_xyzw; - } - - template - GLM_FUNC_QUALIFIER T simplex(glm::vec<2, T, Q> const& v) - { - vec<4, T, Q> const C = vec<4, T, Q>( - T( 0.211324865405187), // (3.0 - sqrt(3.0)) / 6.0 - T( 0.366025403784439), // 0.5 * (sqrt(3.0) - 1.0) - T(-0.577350269189626), // -1.0 + 2.0 * C.x - T( 0.024390243902439)); // 1.0 / 41.0 - - // First corner - vec<2, T, Q> i = floor(v + dot(v, vec<2, T, Q>(C[1]))); - vec<2, T, Q> x0 = v - i + dot(i, vec<2, T, Q>(C[0])); - - // Other corners - //i1.x = step( x0.y, x0.x ); // x0.x > x0.y ? 1.0 : 0.0 - //i1.y = 1.0 - i1.x; - vec<2, T, Q> i1 = (x0.x > x0.y) ? vec<2, T, Q>(1, 0) : vec<2, T, Q>(0, 1); - // x0 = x0 - 0.0 + 0.0 * C.xx ; - // x1 = x0 - i1 + 1.0 * C.xx ; - // x2 = x0 - 1.0 + 2.0 * C.xx ; - vec<4, T, Q> x12 = vec<4, T, Q>(x0.x, x0.y, x0.x, x0.y) + vec<4, T, Q>(C.x, C.x, C.z, C.z); - x12 = vec<4, T, Q>(vec<2, T, Q>(x12) - i1, x12.z, x12.w); - - // Permutations - i = mod(i, vec<2, T, Q>(289)); // Avoid truncation effects in permutation - vec<3, T, Q> p = detail::permute( - detail::permute(i.y + vec<3, T, Q>(T(0), i1.y, T(1))) - + i.x + vec<3, T, Q>(T(0), i1.x, T(1))); - - vec<3, T, Q> m = max(vec<3, T, Q>(0.5) - vec<3, T, Q>( - dot(x0, x0), - dot(vec<2, T, Q>(x12.x, x12.y), vec<2, T, Q>(x12.x, x12.y)), - dot(vec<2, T, Q>(x12.z, x12.w), vec<2, T, Q>(x12.z, x12.w))), vec<3, T, Q>(0)); - m = m * m ; - m = m * m ; - - // Gradients: 41 points uniformly over a line, mapped onto a diamond. - // The ring size 17*17 = 289 is close to a multiple of 41 (41*7 = 287) - - vec<3, T, Q> x = static_cast(2) * fract(p * C.w) - T(1); - vec<3, T, Q> h = abs(x) - T(0.5); - vec<3, T, Q> ox = floor(x + T(0.5)); - vec<3, T, Q> a0 = x - ox; - - // Normalise gradients implicitly by scaling m - // Inlined for speed: m *= taylorInvSqrt( a0*a0 + h*h ); - m *= static_cast(1.79284291400159) - T(0.85373472095314) * (a0 * a0 + h * h); - - // Compute final noise value at P - vec<3, T, Q> g; - g.x = a0.x * x0.x + h.x * x0.y; - //g.yz = a0.yz * x12.xz + h.yz * x12.yw; - g.y = a0.y * x12.x + h.y * x12.y; - g.z = a0.z * x12.z + h.z * x12.w; - return T(130) * dot(m, g); - } - - template - GLM_FUNC_QUALIFIER T simplex(vec<3, T, Q> const& v) - { - vec<2, T, Q> const C(1.0 / 6.0, 1.0 / 3.0); - vec<4, T, Q> const D(0.0, 0.5, 1.0, 2.0); - - // First corner - vec<3, T, Q> i(floor(v + dot(v, vec<3, T, Q>(C.y)))); - vec<3, T, Q> x0(v - i + dot(i, vec<3, T, Q>(C.x))); - - // Other corners - vec<3, T, Q> g(step(vec<3, T, Q>(x0.y, x0.z, x0.x), x0)); - vec<3, T, Q> l(T(1) - g); - vec<3, T, Q> i1(min(g, vec<3, T, Q>(l.z, l.x, l.y))); - vec<3, T, Q> i2(max(g, vec<3, T, Q>(l.z, l.x, l.y))); - - // x0 = x0 - 0.0 + 0.0 * C.xxx; - // x1 = x0 - i1 + 1.0 * C.xxx; - // x2 = x0 - i2 + 2.0 * C.xxx; - // x3 = x0 - 1.0 + 3.0 * C.xxx; - vec<3, T, Q> x1(x0 - i1 + C.x); - vec<3, T, Q> x2(x0 - i2 + C.y); // 2.0*C.x = 1/3 = C.y - vec<3, T, Q> x3(x0 - D.y); // -1.0+3.0*C.x = -0.5 = -D.y - - // Permutations - i = detail::mod289(i); - vec<4, T, Q> p(detail::permute(detail::permute(detail::permute( - i.z + vec<4, T, Q>(T(0), i1.z, i2.z, T(1))) + - i.y + vec<4, T, Q>(T(0), i1.y, i2.y, T(1))) + - i.x + vec<4, T, Q>(T(0), i1.x, i2.x, T(1)))); - - // Gradients: 7x7 points over a square, mapped onto an octahedron. - // The ring size 17*17 = 289 is close to a multiple of 49 (49*6 = 294) - T n_ = static_cast(0.142857142857); // 1.0/7.0 - vec<3, T, Q> ns(n_ * vec<3, T, Q>(D.w, D.y, D.z) - vec<3, T, Q>(D.x, D.z, D.x)); - - vec<4, T, Q> j(p - T(49) * floor(p * ns.z * ns.z)); // mod(p,7*7) - - vec<4, T, Q> x_(floor(j * ns.z)); - vec<4, T, Q> y_(floor(j - T(7) * x_)); // mod(j,N) - - vec<4, T, Q> x(x_ * ns.x + ns.y); - vec<4, T, Q> y(y_ * ns.x + ns.y); - vec<4, T, Q> h(T(1) - abs(x) - abs(y)); - - vec<4, T, Q> b0(x.x, x.y, y.x, y.y); - vec<4, T, Q> b1(x.z, x.w, y.z, y.w); - - // vec4 s0 = vec4(lessThan(b0,0.0))*2.0 - 1.0; - // vec4 s1 = vec4(lessThan(b1,0.0))*2.0 - 1.0; - vec<4, T, Q> s0(floor(b0) * T(2) + T(1)); - vec<4, T, Q> s1(floor(b1) * T(2) + T(1)); - vec<4, T, Q> sh(-step(h, vec<4, T, Q>(0.0))); - - vec<4, T, Q> a0 = vec<4, T, Q>(b0.x, b0.z, b0.y, b0.w) + vec<4, T, Q>(s0.x, s0.z, s0.y, s0.w) * vec<4, T, Q>(sh.x, sh.x, sh.y, sh.y); - vec<4, T, Q> a1 = vec<4, T, Q>(b1.x, b1.z, b1.y, b1.w) + vec<4, T, Q>(s1.x, s1.z, s1.y, s1.w) * vec<4, T, Q>(sh.z, sh.z, sh.w, sh.w); - - vec<3, T, Q> p0(a0.x, a0.y, h.x); - vec<3, T, Q> p1(a0.z, a0.w, h.y); - vec<3, T, Q> p2(a1.x, a1.y, h.z); - vec<3, T, Q> p3(a1.z, a1.w, h.w); - - // Normalise gradients - vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(p0, p0), dot(p1, p1), dot(p2, p2), dot(p3, p3))); - p0 *= norm.x; - p1 *= norm.y; - p2 *= norm.z; - p3 *= norm.w; - - // Mix final noise value - vec<4, T, Q> m = max(T(0.6) - vec<4, T, Q>(dot(x0, x0), dot(x1, x1), dot(x2, x2), dot(x3, x3)), vec<4, T, Q>(0)); - m = m * m; - return T(42) * dot(m * m, vec<4, T, Q>(dot(p0, x0), dot(p1, x1), dot(p2, x2), dot(p3, x3))); - } - - template - GLM_FUNC_QUALIFIER T simplex(vec<4, T, Q> const& v) - { - vec<4, T, Q> const C( - 0.138196601125011, // (5 - sqrt(5))/20 G4 - 0.276393202250021, // 2 * G4 - 0.414589803375032, // 3 * G4 - -0.447213595499958); // -1 + 4 * G4 - - // (sqrt(5) - 1)/4 = F4, used once below - T const F4 = static_cast(0.309016994374947451); - - // First corner - vec<4, T, Q> i = floor(v + dot(v, vec<4, T, Q>(F4))); - vec<4, T, Q> x0 = v - i + dot(i, vec<4, T, Q>(C.x)); - - // Other corners - - // Rank sorting originally contributed by Bill Licea-Kane, AMD (formerly ATI) - vec<4, T, Q> i0; - vec<3, T, Q> isX = step(vec<3, T, Q>(x0.y, x0.z, x0.w), vec<3, T, Q>(x0.x)); - vec<3, T, Q> isYZ = step(vec<3, T, Q>(x0.z, x0.w, x0.w), vec<3, T, Q>(x0.y, x0.y, x0.z)); - // i0.x = dot(isX, vec3(1.0)); - //i0.x = isX.x + isX.y + isX.z; - //i0.yzw = static_cast(1) - isX; - i0 = vec<4, T, Q>(isX.x + isX.y + isX.z, T(1) - isX); - // i0.y += dot(isYZ.xy, vec2(1.0)); - i0.y += isYZ.x + isYZ.y; - //i0.zw += 1.0 - vec<2, T, Q>(isYZ.x, isYZ.y); - i0.z += static_cast(1) - isYZ.x; - i0.w += static_cast(1) - isYZ.y; - i0.z += isYZ.z; - i0.w += static_cast(1) - isYZ.z; - - // i0 now contains the unique values 0,1,2,3 in each channel - vec<4, T, Q> i3 = clamp(i0, T(0), T(1)); - vec<4, T, Q> i2 = clamp(i0 - T(1), T(0), T(1)); - vec<4, T, Q> i1 = clamp(i0 - T(2), T(0), T(1)); - - // x0 = x0 - 0.0 + 0.0 * C.xxxx - // x1 = x0 - i1 + 0.0 * C.xxxx - // x2 = x0 - i2 + 0.0 * C.xxxx - // x3 = x0 - i3 + 0.0 * C.xxxx - // x4 = x0 - 1.0 + 4.0 * C.xxxx - vec<4, T, Q> x1 = x0 - i1 + C.x; - vec<4, T, Q> x2 = x0 - i2 + C.y; - vec<4, T, Q> x3 = x0 - i3 + C.z; - vec<4, T, Q> x4 = x0 + C.w; - - // Permutations - i = mod(i, vec<4, T, Q>(289)); - T j0 = detail::permute(detail::permute(detail::permute(detail::permute(i.w) + i.z) + i.y) + i.x); - vec<4, T, Q> j1 = detail::permute(detail::permute(detail::permute(detail::permute( - i.w + vec<4, T, Q>(i1.w, i2.w, i3.w, T(1))) + - i.z + vec<4, T, Q>(i1.z, i2.z, i3.z, T(1))) + - i.y + vec<4, T, Q>(i1.y, i2.y, i3.y, T(1))) + - i.x + vec<4, T, Q>(i1.x, i2.x, i3.x, T(1))); - - // Gradients: 7x7x6 points over a cube, mapped onto a 4-cross polytope - // 7*7*6 = 294, which is close to the ring size 17*17 = 289. - vec<4, T, Q> ip = vec<4, T, Q>(T(1) / T(294), T(1) / T(49), T(1) / T(7), T(0)); - - vec<4, T, Q> p0 = gtc::grad4(j0, ip); - vec<4, T, Q> p1 = gtc::grad4(j1.x, ip); - vec<4, T, Q> p2 = gtc::grad4(j1.y, ip); - vec<4, T, Q> p3 = gtc::grad4(j1.z, ip); - vec<4, T, Q> p4 = gtc::grad4(j1.w, ip); - - // Normalise gradients - vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(p0, p0), dot(p1, p1), dot(p2, p2), dot(p3, p3))); - p0 *= norm.x; - p1 *= norm.y; - p2 *= norm.z; - p3 *= norm.w; - p4 *= detail::taylorInvSqrt(dot(p4, p4)); - - // Mix contributions from the five corners - vec<3, T, Q> m0 = max(T(0.6) - vec<3, T, Q>(dot(x0, x0), dot(x1, x1), dot(x2, x2)), vec<3, T, Q>(0)); - vec<2, T, Q> m1 = max(T(0.6) - vec<2, T, Q>(dot(x3, x3), dot(x4, x4) ), vec<2, T, Q>(0)); - m0 = m0 * m0; - m1 = m1 * m1; - return T(49) * - (dot(m0 * m0, vec<3, T, Q>(dot(p0, x0), dot(p1, x1), dot(p2, x2))) + - dot(m1 * m1, vec<2, T, Q>(dot(p3, x3), dot(p4, x4)))); - } -}//namespace glm diff --git a/third_party/glm/gtc/packing.hpp b/third_party/glm/gtc/packing.hpp deleted file mode 100755 index 7c64aba..0000000 --- a/third_party/glm/gtc/packing.hpp +++ /dev/null @@ -1,728 +0,0 @@ -/// @ref gtc_packing -/// @file glm/gtc/packing.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_packing GLM_GTC_packing -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// This extension provides a set of function to convert vertors to packed -/// formats. - -#pragma once - -// Dependency: -#include "type_precision.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_packing extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_packing - /// @{ - - /// First, converts the normalized floating-point value v into a 8-bit integer value. - /// Then, the results are packed into the returned 8-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packUnorm1x8: round(clamp(c, 0, +1) * 255.0) - /// - /// @see gtc_packing - /// @see uint16 packUnorm2x8(vec2 const& v) - /// @see uint32 packUnorm4x8(vec4 const& v) - /// @see GLSL packUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint8 packUnorm1x8(float v); - - /// Convert a single 8-bit integer to a normalized floating-point value. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackUnorm4x8: f / 255.0 - /// - /// @see gtc_packing - /// @see vec2 unpackUnorm2x8(uint16 p) - /// @see vec4 unpackUnorm4x8(uint32 p) - /// @see GLSL unpackUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL float unpackUnorm1x8(uint8 p); - - /// First, converts each component of the normalized floating-point value v into 8-bit integer values. - /// Then, the results are packed into the returned 16-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packUnorm2x8: round(clamp(c, 0, +1) * 255.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see gtc_packing - /// @see uint8 packUnorm1x8(float const& v) - /// @see uint32 packUnorm4x8(vec4 const& v) - /// @see GLSL packUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint16 packUnorm2x8(vec2 const& v); - - /// First, unpacks a single 16-bit unsigned integer p into a pair of 8-bit unsigned integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned two-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackUnorm4x8: f / 255.0 - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see float unpackUnorm1x8(uint8 v) - /// @see vec4 unpackUnorm4x8(uint32 p) - /// @see GLSL unpackUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec2 unpackUnorm2x8(uint16 p); - - /// First, converts the normalized floating-point value v into 8-bit integer value. - /// Then, the results are packed into the returned 8-bit unsigned integer. - /// - /// The conversion to fixed point is done as follows: - /// packSnorm1x8: round(clamp(s, -1, +1) * 127.0) - /// - /// @see gtc_packing - /// @see uint16 packSnorm2x8(vec2 const& v) - /// @see uint32 packSnorm4x8(vec4 const& v) - /// @see GLSL packSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint8 packSnorm1x8(float s); - - /// First, unpacks a single 8-bit unsigned integer p into a single 8-bit signed integers. - /// Then, the value is converted to a normalized floating-point value to generate the returned scalar. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm1x8: clamp(f / 127.0, -1, +1) - /// - /// @see gtc_packing - /// @see vec2 unpackSnorm2x8(uint16 p) - /// @see vec4 unpackSnorm4x8(uint32 p) - /// @see GLSL unpackSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL float unpackSnorm1x8(uint8 p); - - /// First, converts each component of the normalized floating-point value v into 8-bit integer values. - /// Then, the results are packed into the returned 16-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packSnorm2x8: round(clamp(c, -1, +1) * 127.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see gtc_packing - /// @see uint8 packSnorm1x8(float const& v) - /// @see uint32 packSnorm4x8(vec4 const& v) - /// @see GLSL packSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint16 packSnorm2x8(vec2 const& v); - - /// First, unpacks a single 16-bit unsigned integer p into a pair of 8-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned two-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm2x8: clamp(f / 127.0, -1, +1) - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see float unpackSnorm1x8(uint8 p) - /// @see vec4 unpackSnorm4x8(uint32 p) - /// @see GLSL unpackSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec2 unpackSnorm2x8(uint16 p); - - /// First, converts the normalized floating-point value v into a 16-bit integer value. - /// Then, the results are packed into the returned 16-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packUnorm1x16: round(clamp(c, 0, +1) * 65535.0) - /// - /// @see gtc_packing - /// @see uint16 packSnorm1x16(float const& v) - /// @see uint64 packSnorm4x16(vec4 const& v) - /// @see GLSL packUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint16 packUnorm1x16(float v); - - /// First, unpacks a single 16-bit unsigned integer p into a of 16-bit unsigned integers. - /// Then, the value is converted to a normalized floating-point value to generate the returned scalar. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackUnorm1x16: f / 65535.0 - /// - /// @see gtc_packing - /// @see vec2 unpackUnorm2x16(uint32 p) - /// @see vec4 unpackUnorm4x16(uint64 p) - /// @see GLSL unpackUnorm2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL float unpackUnorm1x16(uint16 p); - - /// First, converts each component of the normalized floating-point value v into 16-bit integer values. - /// Then, the results are packed into the returned 64-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packUnorm4x16: round(clamp(c, 0, +1) * 65535.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see gtc_packing - /// @see uint16 packUnorm1x16(float const& v) - /// @see uint32 packUnorm2x16(vec2 const& v) - /// @see GLSL packUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint64 packUnorm4x16(vec4 const& v); - - /// First, unpacks a single 64-bit unsigned integer p into four 16-bit unsigned integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackUnormx4x16: f / 65535.0 - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see float unpackUnorm1x16(uint16 p) - /// @see vec2 unpackUnorm2x16(uint32 p) - /// @see GLSL unpackUnorm2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec4 unpackUnorm4x16(uint64 p); - - /// First, converts the normalized floating-point value v into 16-bit integer value. - /// Then, the results are packed into the returned 16-bit unsigned integer. - /// - /// The conversion to fixed point is done as follows: - /// packSnorm1x8: round(clamp(s, -1, +1) * 32767.0) - /// - /// @see gtc_packing - /// @see uint32 packSnorm2x16(vec2 const& v) - /// @see uint64 packSnorm4x16(vec4 const& v) - /// @see GLSL packSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint16 packSnorm1x16(float v); - - /// First, unpacks a single 16-bit unsigned integer p into a single 16-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned scalar. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm1x16: clamp(f / 32767.0, -1, +1) - /// - /// @see gtc_packing - /// @see vec2 unpackSnorm2x16(uint32 p) - /// @see vec4 unpackSnorm4x16(uint64 p) - /// @see GLSL unpackSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL float unpackSnorm1x16(uint16 p); - - /// First, converts each component of the normalized floating-point value v into 16-bit integer values. - /// Then, the results are packed into the returned 64-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packSnorm2x8: round(clamp(c, -1, +1) * 32767.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see gtc_packing - /// @see uint16 packSnorm1x16(float const& v) - /// @see uint32 packSnorm2x16(vec2 const& v) - /// @see GLSL packSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint64 packSnorm4x16(vec4 const& v); - - /// First, unpacks a single 64-bit unsigned integer p into four 16-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm4x16: clamp(f / 32767.0, -1, +1) - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see float unpackSnorm1x16(uint16 p) - /// @see vec2 unpackSnorm2x16(uint32 p) - /// @see GLSL unpackSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec4 unpackSnorm4x16(uint64 p); - - /// Returns an unsigned integer obtained by converting the components of a floating-point scalar - /// to the 16-bit floating-point representation found in the OpenGL Specification, - /// and then packing this 16-bit value into a 16-bit unsigned integer. - /// - /// @see gtc_packing - /// @see uint32 packHalf2x16(vec2 const& v) - /// @see uint64 packHalf4x16(vec4 const& v) - /// @see GLSL packHalf2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint16 packHalf1x16(float v); - - /// Returns a floating-point scalar with components obtained by unpacking a 16-bit unsigned integer into a 16-bit value, - /// interpreted as a 16-bit floating-point number according to the OpenGL Specification, - /// and converting it to 32-bit floating-point values. - /// - /// @see gtc_packing - /// @see vec2 unpackHalf2x16(uint32 const& v) - /// @see vec4 unpackHalf4x16(uint64 const& v) - /// @see GLSL unpackHalf2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL float unpackHalf1x16(uint16 v); - - /// Returns an unsigned integer obtained by converting the components of a four-component floating-point vector - /// to the 16-bit floating-point representation found in the OpenGL Specification, - /// and then packing these four 16-bit values into a 64-bit unsigned integer. - /// The first vector component specifies the 16 least-significant bits of the result; - /// the forth component specifies the 16 most-significant bits. - /// - /// @see gtc_packing - /// @see uint16 packHalf1x16(float const& v) - /// @see uint32 packHalf2x16(vec2 const& v) - /// @see GLSL packHalf2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint64 packHalf4x16(vec4 const& v); - - /// Returns a four-component floating-point vector with components obtained by unpacking a 64-bit unsigned integer into four 16-bit values, - /// interpreting those values as 16-bit floating-point numbers according to the OpenGL Specification, - /// and converting them to 32-bit floating-point values. - /// The first component of the vector is obtained from the 16 least-significant bits of v; - /// the forth component is obtained from the 16 most-significant bits of v. - /// - /// @see gtc_packing - /// @see float unpackHalf1x16(uint16 const& v) - /// @see vec2 unpackHalf2x16(uint32 const& v) - /// @see GLSL unpackHalf2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec4 unpackHalf4x16(uint64 p); - - /// Returns an unsigned integer obtained by converting the components of a four-component signed integer vector - /// to the 10-10-10-2-bit signed integer representation found in the OpenGL Specification, - /// and then packing these four values into a 32-bit unsigned integer. - /// The first vector component specifies the 10 least-significant bits of the result; - /// the forth component specifies the 2 most-significant bits. - /// - /// @see gtc_packing - /// @see uint32 packI3x10_1x2(uvec4 const& v) - /// @see uint32 packSnorm3x10_1x2(vec4 const& v) - /// @see uint32 packUnorm3x10_1x2(vec4 const& v) - /// @see ivec4 unpackI3x10_1x2(uint32 const& p) - GLM_FUNC_DECL uint32 packI3x10_1x2(ivec4 const& v); - - /// Unpacks a single 32-bit unsigned integer p into three 10-bit and one 2-bit signed integers. - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see uint32 packU3x10_1x2(uvec4 const& v) - /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p); - /// @see uvec4 unpackI3x10_1x2(uint32 const& p); - GLM_FUNC_DECL ivec4 unpackI3x10_1x2(uint32 p); - - /// Returns an unsigned integer obtained by converting the components of a four-component unsigned integer vector - /// to the 10-10-10-2-bit unsigned integer representation found in the OpenGL Specification, - /// and then packing these four values into a 32-bit unsigned integer. - /// The first vector component specifies the 10 least-significant bits of the result; - /// the forth component specifies the 2 most-significant bits. - /// - /// @see gtc_packing - /// @see uint32 packI3x10_1x2(ivec4 const& v) - /// @see uint32 packSnorm3x10_1x2(vec4 const& v) - /// @see uint32 packUnorm3x10_1x2(vec4 const& v) - /// @see ivec4 unpackU3x10_1x2(uint32 const& p) - GLM_FUNC_DECL uint32 packU3x10_1x2(uvec4 const& v); - - /// Unpacks a single 32-bit unsigned integer p into three 10-bit and one 2-bit unsigned integers. - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see uint32 packU3x10_1x2(uvec4 const& v) - /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p); - /// @see uvec4 unpackI3x10_1x2(uint32 const& p); - GLM_FUNC_DECL uvec4 unpackU3x10_1x2(uint32 p); - - /// First, converts the first three components of the normalized floating-point value v into 10-bit signed integer values. - /// Then, converts the forth component of the normalized floating-point value v into 2-bit signed integer values. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packSnorm3x10_1x2(xyz): round(clamp(c, -1, +1) * 511.0) - /// packSnorm3x10_1x2(w): round(clamp(c, -1, +1) * 1.0) - /// - /// The first vector component specifies the 10 least-significant bits of the result; - /// the forth component specifies the 2 most-significant bits. - /// - /// @see gtc_packing - /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p) - /// @see uint32 packUnorm3x10_1x2(vec4 const& v) - /// @see uint32 packU3x10_1x2(uvec4 const& v) - /// @see uint32 packI3x10_1x2(ivec4 const& v) - GLM_FUNC_DECL uint32 packSnorm3x10_1x2(vec4 const& v); - - /// First, unpacks a single 32-bit unsigned integer p into four 16-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm3x10_1x2(xyz): clamp(f / 511.0, -1, +1) - /// unpackSnorm3x10_1x2(w): clamp(f / 511.0, -1, +1) - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see uint32 packSnorm3x10_1x2(vec4 const& v) - /// @see vec4 unpackUnorm3x10_1x2(uint32 const& p)) - /// @see uvec4 unpackI3x10_1x2(uint32 const& p) - /// @see uvec4 unpackU3x10_1x2(uint32 const& p) - GLM_FUNC_DECL vec4 unpackSnorm3x10_1x2(uint32 p); - - /// First, converts the first three components of the normalized floating-point value v into 10-bit unsigned integer values. - /// Then, converts the forth component of the normalized floating-point value v into 2-bit signed uninteger values. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packUnorm3x10_1x2(xyz): round(clamp(c, 0, +1) * 1023.0) - /// packUnorm3x10_1x2(w): round(clamp(c, 0, +1) * 3.0) - /// - /// The first vector component specifies the 10 least-significant bits of the result; - /// the forth component specifies the 2 most-significant bits. - /// - /// @see gtc_packing - /// @see vec4 unpackUnorm3x10_1x2(uint32 const& p) - /// @see uint32 packUnorm3x10_1x2(vec4 const& v) - /// @see uint32 packU3x10_1x2(uvec4 const& v) - /// @see uint32 packI3x10_1x2(ivec4 const& v) - GLM_FUNC_DECL uint32 packUnorm3x10_1x2(vec4 const& v); - - /// First, unpacks a single 32-bit unsigned integer p into four 16-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm3x10_1x2(xyz): clamp(f / 1023.0, 0, +1) - /// unpackSnorm3x10_1x2(w): clamp(f / 3.0, 0, +1) - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see uint32 packSnorm3x10_1x2(vec4 const& v) - /// @see vec4 unpackInorm3x10_1x2(uint32 const& p)) - /// @see uvec4 unpackI3x10_1x2(uint32 const& p) - /// @see uvec4 unpackU3x10_1x2(uint32 const& p) - GLM_FUNC_DECL vec4 unpackUnorm3x10_1x2(uint32 p); - - /// First, converts the first two components of the normalized floating-point value v into 11-bit signless floating-point values. - /// Then, converts the third component of the normalized floating-point value v into a 10-bit signless floating-point value. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The first vector component specifies the 11 least-significant bits of the result; - /// the last component specifies the 10 most-significant bits. - /// - /// @see gtc_packing - /// @see vec3 unpackF2x11_1x10(uint32 const& p) - GLM_FUNC_DECL uint32 packF2x11_1x10(vec3 const& v); - - /// First, unpacks a single 32-bit unsigned integer p into two 11-bit signless floating-point values and one 10-bit signless floating-point value . - /// Then, each component is converted to a normalized floating-point value to generate the returned three-component vector. - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see uint32 packF2x11_1x10(vec3 const& v) - GLM_FUNC_DECL vec3 unpackF2x11_1x10(uint32 p); - - - /// First, converts the first two components of the normalized floating-point value v into 11-bit signless floating-point values. - /// Then, converts the third component of the normalized floating-point value v into a 10-bit signless floating-point value. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The first vector component specifies the 11 least-significant bits of the result; - /// the last component specifies the 10 most-significant bits. - /// - /// packF3x9_E1x5 allows encoding into RGBE / RGB9E5 format - /// - /// @see gtc_packing - /// @see vec3 unpackF3x9_E1x5(uint32 const& p) - GLM_FUNC_DECL uint32 packF3x9_E1x5(vec3 const& v); - - /// First, unpacks a single 32-bit unsigned integer p into two 11-bit signless floating-point values and one 10-bit signless floating-point value . - /// Then, each component is converted to a normalized floating-point value to generate the returned three-component vector. - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// unpackF3x9_E1x5 allows decoding RGBE / RGB9E5 data - /// - /// @see gtc_packing - /// @see uint32 packF3x9_E1x5(vec3 const& v) - GLM_FUNC_DECL vec3 unpackF3x9_E1x5(uint32 p); - - /// Returns an unsigned integer vector obtained by converting the components of a floating-point vector - /// to the 16-bit floating-point representation found in the OpenGL Specification. - /// The first vector component specifies the 16 least-significant bits of the result; - /// the forth component specifies the 16 most-significant bits. - /// - /// @see gtc_packing - /// @see vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& p) - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - template - GLM_FUNC_DECL vec<4, T, Q> packRGBM(vec<3, T, Q> const& rgb); - - /// Returns a floating-point vector with components obtained by reinterpreting an integer vector as 16-bit floating-point numbers and converting them to 32-bit floating-point values. - /// The first component of the vector is obtained from the 16 least-significant bits of v; - /// the forth component is obtained from the 16 most-significant bits of v. - /// - /// @see gtc_packing - /// @see vec<4, T, Q> packRGBM(vec<3, float, Q> const& v) - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - template - GLM_FUNC_DECL vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& rgbm); - - /// Returns an unsigned integer vector obtained by converting the components of a floating-point vector - /// to the 16-bit floating-point representation found in the OpenGL Specification. - /// The first vector component specifies the 16 least-significant bits of the result; - /// the forth component specifies the 16 most-significant bits. - /// - /// @see gtc_packing - /// @see vec unpackHalf(vec const& p) - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - template - GLM_FUNC_DECL vec packHalf(vec const& v); - - /// Returns a floating-point vector with components obtained by reinterpreting an integer vector as 16-bit floating-point numbers and converting them to 32-bit floating-point values. - /// The first component of the vector is obtained from the 16 least-significant bits of v; - /// the forth component is obtained from the 16 most-significant bits of v. - /// - /// @see gtc_packing - /// @see vec packHalf(vec const& v) - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - template - GLM_FUNC_DECL vec unpackHalf(vec const& p); - - /// Convert each component of the normalized floating-point vector into unsigned integer values. - /// - /// @see gtc_packing - /// @see vec unpackUnorm(vec const& p); - template - GLM_FUNC_DECL vec packUnorm(vec const& v); - - /// Convert a packed integer to a normalized floating-point vector. - /// - /// @see gtc_packing - /// @see vec packUnorm(vec const& v) - template - GLM_FUNC_DECL vec unpackUnorm(vec const& v); - - /// Convert each component of the normalized floating-point vector into signed integer values. - /// - /// @see gtc_packing - /// @see vec unpackSnorm(vec const& p); - template - GLM_FUNC_DECL vec packSnorm(vec const& v); - - /// Convert a packed integer to a normalized floating-point vector. - /// - /// @see gtc_packing - /// @see vec packSnorm(vec const& v) - template - GLM_FUNC_DECL vec unpackSnorm(vec const& v); - - /// Convert each component of the normalized floating-point vector into unsigned integer values. - /// - /// @see gtc_packing - /// @see vec2 unpackUnorm2x4(uint8 p) - GLM_FUNC_DECL uint8 packUnorm2x4(vec2 const& v); - - /// Convert a packed integer to a normalized floating-point vector. - /// - /// @see gtc_packing - /// @see uint8 packUnorm2x4(vec2 const& v) - GLM_FUNC_DECL vec2 unpackUnorm2x4(uint8 p); - - /// Convert each component of the normalized floating-point vector into unsigned integer values. - /// - /// @see gtc_packing - /// @see vec4 unpackUnorm4x4(uint16 p) - GLM_FUNC_DECL uint16 packUnorm4x4(vec4 const& v); - - /// Convert a packed integer to a normalized floating-point vector. - /// - /// @see gtc_packing - /// @see uint16 packUnorm4x4(vec4 const& v) - GLM_FUNC_DECL vec4 unpackUnorm4x4(uint16 p); - - /// Convert each component of the normalized floating-point vector into unsigned integer values. - /// - /// @see gtc_packing - /// @see vec3 unpackUnorm1x5_1x6_1x5(uint16 p) - GLM_FUNC_DECL uint16 packUnorm1x5_1x6_1x5(vec3 const& v); - - /// Convert a packed integer to a normalized floating-point vector. - /// - /// @see gtc_packing - /// @see uint16 packUnorm1x5_1x6_1x5(vec3 const& v) - GLM_FUNC_DECL vec3 unpackUnorm1x5_1x6_1x5(uint16 p); - - /// Convert each component of the normalized floating-point vector into unsigned integer values. - /// - /// @see gtc_packing - /// @see vec4 unpackUnorm3x5_1x1(uint16 p) - GLM_FUNC_DECL uint16 packUnorm3x5_1x1(vec4 const& v); - - /// Convert a packed integer to a normalized floating-point vector. - /// - /// @see gtc_packing - /// @see uint16 packUnorm3x5_1x1(vec4 const& v) - GLM_FUNC_DECL vec4 unpackUnorm3x5_1x1(uint16 p); - - /// Convert each component of the normalized floating-point vector into unsigned integer values. - /// - /// @see gtc_packing - /// @see vec3 unpackUnorm2x3_1x2(uint8 p) - GLM_FUNC_DECL uint8 packUnorm2x3_1x2(vec3 const& v); - - /// Convert a packed integer to a normalized floating-point vector. - /// - /// @see gtc_packing - /// @see uint8 packUnorm2x3_1x2(vec3 const& v) - GLM_FUNC_DECL vec3 unpackUnorm2x3_1x2(uint8 p); - - - - /// Convert each component from an integer vector into a packed integer. - /// - /// @see gtc_packing - /// @see i8vec2 unpackInt2x8(int16 p) - GLM_FUNC_DECL int16 packInt2x8(i8vec2 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see int16 packInt2x8(i8vec2 const& v) - GLM_FUNC_DECL i8vec2 unpackInt2x8(int16 p); - - /// Convert each component from an integer vector into a packed unsigned integer. - /// - /// @see gtc_packing - /// @see u8vec2 unpackInt2x8(uint16 p) - GLM_FUNC_DECL uint16 packUint2x8(u8vec2 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see uint16 packInt2x8(u8vec2 const& v) - GLM_FUNC_DECL u8vec2 unpackUint2x8(uint16 p); - - /// Convert each component from an integer vector into a packed integer. - /// - /// @see gtc_packing - /// @see i8vec4 unpackInt4x8(int32 p) - GLM_FUNC_DECL int32 packInt4x8(i8vec4 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see int32 packInt2x8(i8vec4 const& v) - GLM_FUNC_DECL i8vec4 unpackInt4x8(int32 p); - - /// Convert each component from an integer vector into a packed unsigned integer. - /// - /// @see gtc_packing - /// @see u8vec4 unpackUint4x8(uint32 p) - GLM_FUNC_DECL uint32 packUint4x8(u8vec4 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see uint32 packUint4x8(u8vec2 const& v) - GLM_FUNC_DECL u8vec4 unpackUint4x8(uint32 p); - - /// Convert each component from an integer vector into a packed integer. - /// - /// @see gtc_packing - /// @see i16vec2 unpackInt2x16(int p) - GLM_FUNC_DECL int packInt2x16(i16vec2 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see int packInt2x16(i16vec2 const& v) - GLM_FUNC_DECL i16vec2 unpackInt2x16(int p); - - /// Convert each component from an integer vector into a packed integer. - /// - /// @see gtc_packing - /// @see i16vec4 unpackInt4x16(int64 p) - GLM_FUNC_DECL int64 packInt4x16(i16vec4 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see int64 packInt4x16(i16vec4 const& v) - GLM_FUNC_DECL i16vec4 unpackInt4x16(int64 p); - - /// Convert each component from an integer vector into a packed unsigned integer. - /// - /// @see gtc_packing - /// @see u16vec2 unpackUint2x16(uint p) - GLM_FUNC_DECL uint packUint2x16(u16vec2 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see uint packUint2x16(u16vec2 const& v) - GLM_FUNC_DECL u16vec2 unpackUint2x16(uint p); - - /// Convert each component from an integer vector into a packed unsigned integer. - /// - /// @see gtc_packing - /// @see u16vec4 unpackUint4x16(uint64 p) - GLM_FUNC_DECL uint64 packUint4x16(u16vec4 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see uint64 packUint4x16(u16vec4 const& v) - GLM_FUNC_DECL u16vec4 unpackUint4x16(uint64 p); - - /// Convert each component from an integer vector into a packed integer. - /// - /// @see gtc_packing - /// @see i32vec2 unpackInt2x32(int p) - GLM_FUNC_DECL int64 packInt2x32(i32vec2 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see int packInt2x16(i32vec2 const& v) - GLM_FUNC_DECL i32vec2 unpackInt2x32(int64 p); - - /// Convert each component from an integer vector into a packed unsigned integer. - /// - /// @see gtc_packing - /// @see u32vec2 unpackUint2x32(int p) - GLM_FUNC_DECL uint64 packUint2x32(u32vec2 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see int packUint2x16(u32vec2 const& v) - GLM_FUNC_DECL u32vec2 unpackUint2x32(uint64 p); - - - /// @} -}// namespace glm - -#include "packing.inl" diff --git a/third_party/glm/gtc/packing.inl b/third_party/glm/gtc/packing.inl deleted file mode 100755 index 8c906e1..0000000 --- a/third_party/glm/gtc/packing.inl +++ /dev/null @@ -1,938 +0,0 @@ -/// @ref gtc_packing - -#include "../ext/scalar_relational.hpp" -#include "../ext/vector_relational.hpp" -#include "../common.hpp" -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include "../detail/type_half.hpp" -#include -#include - -namespace glm{ -namespace detail -{ - GLM_FUNC_QUALIFIER glm::uint16 float2half(glm::uint32 f) - { - // 10 bits => EE EEEFFFFF - // 11 bits => EEE EEFFFFFF - // Half bits => SEEEEEFF FFFFFFFF - // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF - - // 0x00007c00 => 00000000 00000000 01111100 00000000 - // 0x000003ff => 00000000 00000000 00000011 11111111 - // 0x38000000 => 00111000 00000000 00000000 00000000 - // 0x7f800000 => 01111111 10000000 00000000 00000000 - // 0x00008000 => 00000000 00000000 10000000 00000000 - return - ((f >> 16) & 0x8000) | // sign - ((((f & 0x7f800000) - 0x38000000) >> 13) & 0x7c00) | // exponential - ((f >> 13) & 0x03ff); // Mantissa - } - - GLM_FUNC_QUALIFIER glm::uint32 float2packed11(glm::uint32 f) - { - // 10 bits => EE EEEFFFFF - // 11 bits => EEE EEFFFFFF - // Half bits => SEEEEEFF FFFFFFFF - // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF - - // 0x000007c0 => 00000000 00000000 00000111 11000000 - // 0x00007c00 => 00000000 00000000 01111100 00000000 - // 0x000003ff => 00000000 00000000 00000011 11111111 - // 0x38000000 => 00111000 00000000 00000000 00000000 - // 0x7f800000 => 01111111 10000000 00000000 00000000 - // 0x00008000 => 00000000 00000000 10000000 00000000 - return - ((((f & 0x7f800000) - 0x38000000) >> 17) & 0x07c0) | // exponential - ((f >> 17) & 0x003f); // Mantissa - } - - GLM_FUNC_QUALIFIER glm::uint32 packed11ToFloat(glm::uint32 p) - { - // 10 bits => EE EEEFFFFF - // 11 bits => EEE EEFFFFFF - // Half bits => SEEEEEFF FFFFFFFF - // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF - - // 0x000007c0 => 00000000 00000000 00000111 11000000 - // 0x00007c00 => 00000000 00000000 01111100 00000000 - // 0x000003ff => 00000000 00000000 00000011 11111111 - // 0x38000000 => 00111000 00000000 00000000 00000000 - // 0x7f800000 => 01111111 10000000 00000000 00000000 - // 0x00008000 => 00000000 00000000 10000000 00000000 - return - ((((p & 0x07c0) << 17) + 0x38000000) & 0x7f800000) | // exponential - ((p & 0x003f) << 17); // Mantissa - } - - GLM_FUNC_QUALIFIER glm::uint32 float2packed10(glm::uint32 f) - { - // 10 bits => EE EEEFFFFF - // 11 bits => EEE EEFFFFFF - // Half bits => SEEEEEFF FFFFFFFF - // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF - - // 0x0000001F => 00000000 00000000 00000000 00011111 - // 0x0000003F => 00000000 00000000 00000000 00111111 - // 0x000003E0 => 00000000 00000000 00000011 11100000 - // 0x000007C0 => 00000000 00000000 00000111 11000000 - // 0x00007C00 => 00000000 00000000 01111100 00000000 - // 0x000003FF => 00000000 00000000 00000011 11111111 - // 0x38000000 => 00111000 00000000 00000000 00000000 - // 0x7f800000 => 01111111 10000000 00000000 00000000 - // 0x00008000 => 00000000 00000000 10000000 00000000 - return - ((((f & 0x7f800000) - 0x38000000) >> 18) & 0x03E0) | // exponential - ((f >> 18) & 0x001f); // Mantissa - } - - GLM_FUNC_QUALIFIER glm::uint32 packed10ToFloat(glm::uint32 p) - { - // 10 bits => EE EEEFFFFF - // 11 bits => EEE EEFFFFFF - // Half bits => SEEEEEFF FFFFFFFF - // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF - - // 0x0000001F => 00000000 00000000 00000000 00011111 - // 0x0000003F => 00000000 00000000 00000000 00111111 - // 0x000003E0 => 00000000 00000000 00000011 11100000 - // 0x000007C0 => 00000000 00000000 00000111 11000000 - // 0x00007C00 => 00000000 00000000 01111100 00000000 - // 0x000003FF => 00000000 00000000 00000011 11111111 - // 0x38000000 => 00111000 00000000 00000000 00000000 - // 0x7f800000 => 01111111 10000000 00000000 00000000 - // 0x00008000 => 00000000 00000000 10000000 00000000 - return - ((((p & 0x03E0) << 18) + 0x38000000) & 0x7f800000) | // exponential - ((p & 0x001f) << 18); // Mantissa - } - - GLM_FUNC_QUALIFIER glm::uint half2float(glm::uint h) - { - return ((h & 0x8000) << 16) | ((( h & 0x7c00) + 0x1C000) << 13) | ((h & 0x03FF) << 13); - } - - GLM_FUNC_QUALIFIER glm::uint floatTo11bit(float x) - { - if(x == 0.0f) - return 0u; - else if(glm::isnan(x)) - return ~0u; - else if(glm::isinf(x)) - return 0x1Fu << 6u; - - uint Pack = 0u; - memcpy(&Pack, &x, sizeof(Pack)); - return float2packed11(Pack); - } - - GLM_FUNC_QUALIFIER float packed11bitToFloat(glm::uint x) - { - if(x == 0) - return 0.0f; - else if(x == ((1 << 11) - 1)) - return ~0;//NaN - else if(x == (0x1f << 6)) - return ~0;//Inf - - uint Result = packed11ToFloat(x); - - float Temp = 0; - memcpy(&Temp, &Result, sizeof(Temp)); - return Temp; - } - - GLM_FUNC_QUALIFIER glm::uint floatTo10bit(float x) - { - if(x == 0.0f) - return 0u; - else if(glm::isnan(x)) - return ~0u; - else if(glm::isinf(x)) - return 0x1Fu << 5u; - - uint Pack = 0; - memcpy(&Pack, &x, sizeof(Pack)); - return float2packed10(Pack); - } - - GLM_FUNC_QUALIFIER float packed10bitToFloat(glm::uint x) - { - if(x == 0) - return 0.0f; - else if(x == ((1 << 10) - 1)) - return ~0;//NaN - else if(x == (0x1f << 5)) - return ~0;//Inf - - uint Result = packed10ToFloat(x); - - float Temp = 0; - memcpy(&Temp, &Result, sizeof(Temp)); - return Temp; - } - -// GLM_FUNC_QUALIFIER glm::uint f11_f11_f10(float x, float y, float z) -// { -// return ((floatTo11bit(x) & ((1 << 11) - 1)) << 0) | ((floatTo11bit(y) & ((1 << 11) - 1)) << 11) | ((floatTo10bit(z) & ((1 << 10) - 1)) << 22); -// } - - union u3u3u2 - { - struct - { - uint x : 3; - uint y : 3; - uint z : 2; - } data; - uint8 pack; - }; - - union u4u4 - { - struct - { - uint x : 4; - uint y : 4; - } data; - uint8 pack; - }; - - union u4u4u4u4 - { - struct - { - uint x : 4; - uint y : 4; - uint z : 4; - uint w : 4; - } data; - uint16 pack; - }; - - union u5u6u5 - { - struct - { - uint x : 5; - uint y : 6; - uint z : 5; - } data; - uint16 pack; - }; - - union u5u5u5u1 - { - struct - { - uint x : 5; - uint y : 5; - uint z : 5; - uint w : 1; - } data; - uint16 pack; - }; - - union u10u10u10u2 - { - struct - { - uint x : 10; - uint y : 10; - uint z : 10; - uint w : 2; - } data; - uint32 pack; - }; - - union i10i10i10i2 - { - struct - { - int x : 10; - int y : 10; - int z : 10; - int w : 2; - } data; - uint32 pack; - }; - - union u9u9u9e5 - { - struct - { - uint x : 9; - uint y : 9; - uint z : 9; - uint w : 5; - } data; - uint32 pack; - }; - - template - struct compute_half - {}; - - template - struct compute_half<1, Q> - { - GLM_FUNC_QUALIFIER static vec<1, uint16, Q> pack(vec<1, float, Q> const& v) - { - int16 const Unpack(detail::toFloat16(v.x)); - u16vec1 Packed; - memcpy(&Packed, &Unpack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER static vec<1, float, Q> unpack(vec<1, uint16, Q> const& v) - { - i16vec1 Unpack; - memcpy(&Unpack, &v, sizeof(Unpack)); - return vec<1, float, Q>(detail::toFloat32(v.x)); - } - }; - - template - struct compute_half<2, Q> - { - GLM_FUNC_QUALIFIER static vec<2, uint16, Q> pack(vec<2, float, Q> const& v) - { - vec<2, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y)); - u16vec2 Packed; - memcpy(&Packed, &Unpack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER static vec<2, float, Q> unpack(vec<2, uint16, Q> const& v) - { - i16vec2 Unpack; - memcpy(&Unpack, &v, sizeof(Unpack)); - return vec<2, float, Q>(detail::toFloat32(v.x), detail::toFloat32(v.y)); - } - }; - - template - struct compute_half<3, Q> - { - GLM_FUNC_QUALIFIER static vec<3, uint16, Q> pack(vec<3, float, Q> const& v) - { - vec<3, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y), detail::toFloat16(v.z)); - u16vec3 Packed; - memcpy(&Packed, &Unpack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER static vec<3, float, Q> unpack(vec<3, uint16, Q> const& v) - { - i16vec3 Unpack; - memcpy(&Unpack, &v, sizeof(Unpack)); - return vec<3, float, Q>(detail::toFloat32(v.x), detail::toFloat32(v.y), detail::toFloat32(v.z)); - } - }; - - template - struct compute_half<4, Q> - { - GLM_FUNC_QUALIFIER static vec<4, uint16, Q> pack(vec<4, float, Q> const& v) - { - vec<4, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y), detail::toFloat16(v.z), detail::toFloat16(v.w)); - u16vec4 Packed; - memcpy(&Packed, &Unpack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER static vec<4, float, Q> unpack(vec<4, uint16, Q> const& v) - { - i16vec4 Unpack; - memcpy(&Unpack, &v, sizeof(Unpack)); - return vec<4, float, Q>(detail::toFloat32(v.x), detail::toFloat32(v.y), detail::toFloat32(v.z), detail::toFloat32(v.w)); - } - }; -}//namespace detail - - GLM_FUNC_QUALIFIER uint8 packUnorm1x8(float v) - { - return static_cast(round(clamp(v, 0.0f, 1.0f) * 255.0f)); - } - - GLM_FUNC_QUALIFIER float unpackUnorm1x8(uint8 p) - { - float const Unpack(p); - return Unpack * static_cast(0.0039215686274509803921568627451); // 1 / 255 - } - - GLM_FUNC_QUALIFIER uint16 packUnorm2x8(vec2 const& v) - { - u8vec2 const Topack(round(clamp(v, 0.0f, 1.0f) * 255.0f)); - - uint16 Unpack = 0; - memcpy(&Unpack, &Topack, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER vec2 unpackUnorm2x8(uint16 p) - { - u8vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return vec2(Unpack) * float(0.0039215686274509803921568627451); // 1 / 255 - } - - GLM_FUNC_QUALIFIER uint8 packSnorm1x8(float v) - { - int8 const Topack(static_cast(round(clamp(v ,-1.0f, 1.0f) * 127.0f))); - uint8 Packed = 0; - memcpy(&Packed, &Topack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER float unpackSnorm1x8(uint8 p) - { - int8 Unpack = 0; - memcpy(&Unpack, &p, sizeof(Unpack)); - return clamp( - static_cast(Unpack) * 0.00787401574803149606299212598425f, // 1.0f / 127.0f - -1.0f, 1.0f); - } - - GLM_FUNC_QUALIFIER uint16 packSnorm2x8(vec2 const& v) - { - i8vec2 const Topack(round(clamp(v, -1.0f, 1.0f) * 127.0f)); - uint16 Packed = 0; - memcpy(&Packed, &Topack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER vec2 unpackSnorm2x8(uint16 p) - { - i8vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return clamp( - vec2(Unpack) * 0.00787401574803149606299212598425f, // 1.0f / 127.0f - -1.0f, 1.0f); - } - - GLM_FUNC_QUALIFIER uint16 packUnorm1x16(float s) - { - return static_cast(round(clamp(s, 0.0f, 1.0f) * 65535.0f)); - } - - GLM_FUNC_QUALIFIER float unpackUnorm1x16(uint16 p) - { - float const Unpack(p); - return Unpack * 1.5259021896696421759365224689097e-5f; // 1.0 / 65535.0 - } - - GLM_FUNC_QUALIFIER uint64 packUnorm4x16(vec4 const& v) - { - u16vec4 const Topack(round(clamp(v , 0.0f, 1.0f) * 65535.0f)); - uint64 Packed = 0; - memcpy(&Packed, &Topack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER vec4 unpackUnorm4x16(uint64 p) - { - u16vec4 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return vec4(Unpack) * 1.5259021896696421759365224689097e-5f; // 1.0 / 65535.0 - } - - GLM_FUNC_QUALIFIER uint16 packSnorm1x16(float v) - { - int16 const Topack = static_cast(round(clamp(v ,-1.0f, 1.0f) * 32767.0f)); - uint16 Packed = 0; - memcpy(&Packed, &Topack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER float unpackSnorm1x16(uint16 p) - { - int16 Unpack = 0; - memcpy(&Unpack, &p, sizeof(Unpack)); - return clamp( - static_cast(Unpack) * 3.0518509475997192297128208258309e-5f, //1.0f / 32767.0f, - -1.0f, 1.0f); - } - - GLM_FUNC_QUALIFIER uint64 packSnorm4x16(vec4 const& v) - { - i16vec4 const Topack(round(clamp(v ,-1.0f, 1.0f) * 32767.0f)); - uint64 Packed = 0; - memcpy(&Packed, &Topack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER vec4 unpackSnorm4x16(uint64 p) - { - i16vec4 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return clamp( - vec4(Unpack) * 3.0518509475997192297128208258309e-5f, //1.0f / 32767.0f, - -1.0f, 1.0f); - } - - GLM_FUNC_QUALIFIER uint16 packHalf1x16(float v) - { - int16 const Topack(detail::toFloat16(v)); - uint16 Packed = 0; - memcpy(&Packed, &Topack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER float unpackHalf1x16(uint16 v) - { - int16 Unpack = 0; - memcpy(&Unpack, &v, sizeof(Unpack)); - return detail::toFloat32(Unpack); - } - - GLM_FUNC_QUALIFIER uint64 packHalf4x16(glm::vec4 const& v) - { - i16vec4 const Unpack( - detail::toFloat16(v.x), - detail::toFloat16(v.y), - detail::toFloat16(v.z), - detail::toFloat16(v.w)); - uint64 Packed = 0; - memcpy(&Packed, &Unpack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER glm::vec4 unpackHalf4x16(uint64 v) - { - i16vec4 Unpack; - memcpy(&Unpack, &v, sizeof(Unpack)); - return vec4( - detail::toFloat32(Unpack.x), - detail::toFloat32(Unpack.y), - detail::toFloat32(Unpack.z), - detail::toFloat32(Unpack.w)); - } - - GLM_FUNC_QUALIFIER uint32 packI3x10_1x2(ivec4 const& v) - { - detail::i10i10i10i2 Result; - Result.data.x = v.x; - Result.data.y = v.y; - Result.data.z = v.z; - Result.data.w = v.w; - return Result.pack; - } - - GLM_FUNC_QUALIFIER ivec4 unpackI3x10_1x2(uint32 v) - { - detail::i10i10i10i2 Unpack; - Unpack.pack = v; - return ivec4( - Unpack.data.x, - Unpack.data.y, - Unpack.data.z, - Unpack.data.w); - } - - GLM_FUNC_QUALIFIER uint32 packU3x10_1x2(uvec4 const& v) - { - detail::u10u10u10u2 Result; - Result.data.x = v.x; - Result.data.y = v.y; - Result.data.z = v.z; - Result.data.w = v.w; - return Result.pack; - } - - GLM_FUNC_QUALIFIER uvec4 unpackU3x10_1x2(uint32 v) - { - detail::u10u10u10u2 Unpack; - Unpack.pack = v; - return uvec4( - Unpack.data.x, - Unpack.data.y, - Unpack.data.z, - Unpack.data.w); - } - - GLM_FUNC_QUALIFIER uint32 packSnorm3x10_1x2(vec4 const& v) - { - ivec4 const Pack(round(clamp(v,-1.0f, 1.0f) * vec4(511.f, 511.f, 511.f, 1.f))); - - detail::i10i10i10i2 Result; - Result.data.x = Pack.x; - Result.data.y = Pack.y; - Result.data.z = Pack.z; - Result.data.w = Pack.w; - return Result.pack; - } - - GLM_FUNC_QUALIFIER vec4 unpackSnorm3x10_1x2(uint32 v) - { - detail::i10i10i10i2 Unpack; - Unpack.pack = v; - - vec4 const Result(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w); - - return clamp(Result * vec4(1.f / 511.f, 1.f / 511.f, 1.f / 511.f, 1.f), -1.0f, 1.0f); - } - - GLM_FUNC_QUALIFIER uint32 packUnorm3x10_1x2(vec4 const& v) - { - uvec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec4(1023.f, 1023.f, 1023.f, 3.f))); - - detail::u10u10u10u2 Result; - Result.data.x = Unpack.x; - Result.data.y = Unpack.y; - Result.data.z = Unpack.z; - Result.data.w = Unpack.w; - return Result.pack; - } - - GLM_FUNC_QUALIFIER vec4 unpackUnorm3x10_1x2(uint32 v) - { - vec4 const ScaleFactors(1.0f / 1023.f, 1.0f / 1023.f, 1.0f / 1023.f, 1.0f / 3.f); - - detail::u10u10u10u2 Unpack; - Unpack.pack = v; - return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactors; - } - - GLM_FUNC_QUALIFIER uint32 packF2x11_1x10(vec3 const& v) - { - return - ((detail::floatTo11bit(v.x) & ((1 << 11) - 1)) << 0) | - ((detail::floatTo11bit(v.y) & ((1 << 11) - 1)) << 11) | - ((detail::floatTo10bit(v.z) & ((1 << 10) - 1)) << 22); - } - - GLM_FUNC_QUALIFIER vec3 unpackF2x11_1x10(uint32 v) - { - return vec3( - detail::packed11bitToFloat(v >> 0), - detail::packed11bitToFloat(v >> 11), - detail::packed10bitToFloat(v >> 22)); - } - - GLM_FUNC_QUALIFIER uint32 packF3x9_E1x5(vec3 const& v) - { - float const SharedExpMax = (pow(2.0f, 9.0f - 1.0f) / pow(2.0f, 9.0f)) * pow(2.0f, 31.f - 15.f); - vec3 const Color = clamp(v, 0.0f, SharedExpMax); - float const MaxColor = max(Color.x, max(Color.y, Color.z)); - - float const ExpSharedP = max(-15.f - 1.f, floor(log2(MaxColor))) + 1.0f + 15.f; - float const MaxShared = floor(MaxColor / pow(2.0f, (ExpSharedP - 15.f - 9.f)) + 0.5f); - float const ExpShared = equal(MaxShared, pow(2.0f, 9.0f), epsilon()) ? ExpSharedP + 1.0f : ExpSharedP; - - uvec3 const ColorComp(floor(Color / pow(2.f, (ExpShared - 15.f - 9.f)) + 0.5f)); - - detail::u9u9u9e5 Unpack; - Unpack.data.x = ColorComp.x; - Unpack.data.y = ColorComp.y; - Unpack.data.z = ColorComp.z; - Unpack.data.w = uint(ExpShared); - return Unpack.pack; - } - - GLM_FUNC_QUALIFIER vec3 unpackF3x9_E1x5(uint32 v) - { - detail::u9u9u9e5 Unpack; - Unpack.pack = v; - - return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * pow(2.0f, Unpack.data.w - 15.f - 9.f); - } - - // Based on Brian Karis http://graphicrants.blogspot.fr/2009/04/rgbm-color-encoding.html - template - GLM_FUNC_QUALIFIER vec<4, T, Q> packRGBM(vec<3, T, Q> const& rgb) - { - vec<3, T, Q> const Color(rgb * static_cast(1.0 / 6.0)); - T Alpha = clamp(max(max(Color.x, Color.y), max(Color.z, static_cast(1e-6))), static_cast(0), static_cast(1)); - Alpha = ceil(Alpha * static_cast(255.0)) / static_cast(255.0); - return vec<4, T, Q>(Color / Alpha, Alpha); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& rgbm) - { - return vec<3, T, Q>(rgbm.x, rgbm.y, rgbm.z) * rgbm.w * static_cast(6); - } - - template - GLM_FUNC_QUALIFIER vec packHalf(vec const& v) - { - return detail::compute_half::pack(v); - } - - template - GLM_FUNC_QUALIFIER vec unpackHalf(vec const& v) - { - return detail::compute_half::unpack(v); - } - - template - GLM_FUNC_QUALIFIER vec packUnorm(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); - - return vec(round(clamp(v, static_cast(0), static_cast(1)) * static_cast(std::numeric_limits::max()))); - } - - template - GLM_FUNC_QUALIFIER vec unpackUnorm(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); - - return vec(v) * (static_cast(1) / static_cast(std::numeric_limits::max())); - } - - template - GLM_FUNC_QUALIFIER vec packSnorm(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); - - return vec(round(clamp(v , static_cast(-1), static_cast(1)) * static_cast(std::numeric_limits::max()))); - } - - template - GLM_FUNC_QUALIFIER vec unpackSnorm(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); - - return clamp(vec(v) * (static_cast(1) / static_cast(std::numeric_limits::max())), static_cast(-1), static_cast(1)); - } - - GLM_FUNC_QUALIFIER uint8 packUnorm2x4(vec2 const& v) - { - u32vec2 const Unpack(round(clamp(v, 0.0f, 1.0f) * 15.0f)); - detail::u4u4 Result; - Result.data.x = Unpack.x; - Result.data.y = Unpack.y; - return Result.pack; - } - - GLM_FUNC_QUALIFIER vec2 unpackUnorm2x4(uint8 v) - { - float const ScaleFactor(1.f / 15.f); - detail::u4u4 Unpack; - Unpack.pack = v; - return vec2(Unpack.data.x, Unpack.data.y) * ScaleFactor; - } - - GLM_FUNC_QUALIFIER uint16 packUnorm4x4(vec4 const& v) - { - u32vec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * 15.0f)); - detail::u4u4u4u4 Result; - Result.data.x = Unpack.x; - Result.data.y = Unpack.y; - Result.data.z = Unpack.z; - Result.data.w = Unpack.w; - return Result.pack; - } - - GLM_FUNC_QUALIFIER vec4 unpackUnorm4x4(uint16 v) - { - float const ScaleFactor(1.f / 15.f); - detail::u4u4u4u4 Unpack; - Unpack.pack = v; - return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactor; - } - - GLM_FUNC_QUALIFIER uint16 packUnorm1x5_1x6_1x5(vec3 const& v) - { - u32vec3 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec3(31.f, 63.f, 31.f))); - detail::u5u6u5 Result; - Result.data.x = Unpack.x; - Result.data.y = Unpack.y; - Result.data.z = Unpack.z; - return Result.pack; - } - - GLM_FUNC_QUALIFIER vec3 unpackUnorm1x5_1x6_1x5(uint16 v) - { - vec3 const ScaleFactor(1.f / 31.f, 1.f / 63.f, 1.f / 31.f); - detail::u5u6u5 Unpack; - Unpack.pack = v; - return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * ScaleFactor; - } - - GLM_FUNC_QUALIFIER uint16 packUnorm3x5_1x1(vec4 const& v) - { - u32vec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec4(31.f, 31.f, 31.f, 1.f))); - detail::u5u5u5u1 Result; - Result.data.x = Unpack.x; - Result.data.y = Unpack.y; - Result.data.z = Unpack.z; - Result.data.w = Unpack.w; - return Result.pack; - } - - GLM_FUNC_QUALIFIER vec4 unpackUnorm3x5_1x1(uint16 v) - { - vec4 const ScaleFactor(1.f / 31.f, 1.f / 31.f, 1.f / 31.f, 1.f); - detail::u5u5u5u1 Unpack; - Unpack.pack = v; - return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactor; - } - - GLM_FUNC_QUALIFIER uint8 packUnorm2x3_1x2(vec3 const& v) - { - u32vec3 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec3(7.f, 7.f, 3.f))); - detail::u3u3u2 Result; - Result.data.x = Unpack.x; - Result.data.y = Unpack.y; - Result.data.z = Unpack.z; - return Result.pack; - } - - GLM_FUNC_QUALIFIER vec3 unpackUnorm2x3_1x2(uint8 v) - { - vec3 const ScaleFactor(1.f / 7.f, 1.f / 7.f, 1.f / 3.f); - detail::u3u3u2 Unpack; - Unpack.pack = v; - return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * ScaleFactor; - } - - GLM_FUNC_QUALIFIER int16 packInt2x8(i8vec2 const& v) - { - int16 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER i8vec2 unpackInt2x8(int16 p) - { - i8vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER uint16 packUint2x8(u8vec2 const& v) - { - uint16 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER u8vec2 unpackUint2x8(uint16 p) - { - u8vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER int32 packInt4x8(i8vec4 const& v) - { - int32 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER i8vec4 unpackInt4x8(int32 p) - { - i8vec4 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER uint32 packUint4x8(u8vec4 const& v) - { - uint32 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER u8vec4 unpackUint4x8(uint32 p) - { - u8vec4 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER int packInt2x16(i16vec2 const& v) - { - int Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER i16vec2 unpackInt2x16(int p) - { - i16vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER int64 packInt4x16(i16vec4 const& v) - { - int64 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER i16vec4 unpackInt4x16(int64 p) - { - i16vec4 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER uint packUint2x16(u16vec2 const& v) - { - uint Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER u16vec2 unpackUint2x16(uint p) - { - u16vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER uint64 packUint4x16(u16vec4 const& v) - { - uint64 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER u16vec4 unpackUint4x16(uint64 p) - { - u16vec4 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER int64 packInt2x32(i32vec2 const& v) - { - int64 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER i32vec2 unpackInt2x32(int64 p) - { - i32vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER uint64 packUint2x32(u32vec2 const& v) - { - uint64 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER u32vec2 unpackUint2x32(uint64 p) - { - u32vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } -}//namespace glm - diff --git a/third_party/glm/gtc/quaternion.hpp b/third_party/glm/gtc/quaternion.hpp deleted file mode 100755 index 359e072..0000000 --- a/third_party/glm/gtc/quaternion.hpp +++ /dev/null @@ -1,173 +0,0 @@ -/// @ref gtc_quaternion -/// @file glm/gtc/quaternion.hpp -/// -/// @see core (dependence) -/// @see gtc_constants (dependence) -/// -/// @defgroup gtc_quaternion GLM_GTC_quaternion -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Defines a templated quaternion type and several quaternion operations. - -#pragma once - -// Dependency: -#include "../gtc/constants.hpp" -#include "../gtc/matrix_transform.hpp" -#include "../ext/vector_relational.hpp" -#include "../ext/quaternion_common.hpp" -#include "../ext/quaternion_float.hpp" -#include "../ext/quaternion_float_precision.hpp" -#include "../ext/quaternion_double.hpp" -#include "../ext/quaternion_double_precision.hpp" -#include "../ext/quaternion_relational.hpp" -#include "../ext/quaternion_geometric.hpp" -#include "../ext/quaternion_trigonometric.hpp" -#include "../ext/quaternion_transform.hpp" -#include "../detail/type_mat3x3.hpp" -#include "../detail/type_mat4x4.hpp" -#include "../detail/type_vec3.hpp" -#include "../detail/type_vec4.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_quaternion extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_quaternion - /// @{ - - /// Returns euler angles, pitch as x, yaw as y, roll as z. - /// The result is expressed in radians. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL vec<3, T, Q> eulerAngles(qua const& x); - - /// Returns roll value of euler angles expressed in radians. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL T roll(qua const& x); - - /// Returns pitch value of euler angles expressed in radians. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL T pitch(qua const& x); - - /// Returns yaw value of euler angles expressed in radians. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL T yaw(qua const& x); - - /// Converts a quaternion to a 3 * 3 matrix. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL mat<3, 3, T, Q> mat3_cast(qua const& x); - - /// Converts a quaternion to a 4 * 4 matrix. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL mat<4, 4, T, Q> mat4_cast(qua const& x); - - /// Converts a pure rotation 3 * 3 matrix to a quaternion. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL qua quat_cast(mat<3, 3, T, Q> const& x); - - /// Converts a pure rotation 4 * 4 matrix to a quaternion. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL qua quat_cast(mat<4, 4, T, Q> const& x); - - /// Returns the component-wise comparison result of x < y. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_relational - template - GLM_FUNC_DECL vec<4, bool, Q> lessThan(qua const& x, qua const& y); - - /// Returns the component-wise comparison of result x <= y. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_relational - template - GLM_FUNC_DECL vec<4, bool, Q> lessThanEqual(qua const& x, qua const& y); - - /// Returns the component-wise comparison of result x > y. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_relational - template - GLM_FUNC_DECL vec<4, bool, Q> greaterThan(qua const& x, qua const& y); - - /// Returns the component-wise comparison of result x >= y. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_relational - template - GLM_FUNC_DECL vec<4, bool, Q> greaterThanEqual(qua const& x, qua const& y); - - /// Build a look at quaternion based on the default handedness. - /// - /// @param direction Desired forward direction. Needs to be normalized. - /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0). - template - GLM_FUNC_DECL qua quatLookAt( - vec<3, T, Q> const& direction, - vec<3, T, Q> const& up); - - /// Build a right-handed look at quaternion. - /// - /// @param direction Desired forward direction onto which the -z-axis gets mapped. Needs to be normalized. - /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0). - template - GLM_FUNC_DECL qua quatLookAtRH( - vec<3, T, Q> const& direction, - vec<3, T, Q> const& up); - - /// Build a left-handed look at quaternion. - /// - /// @param direction Desired forward direction onto which the +z-axis gets mapped. Needs to be normalized. - /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0). - template - GLM_FUNC_DECL qua quatLookAtLH( - vec<3, T, Q> const& direction, - vec<3, T, Q> const& up); - /// @} -} //namespace glm - -#include "quaternion.inl" diff --git a/third_party/glm/gtc/quaternion.inl b/third_party/glm/gtc/quaternion.inl deleted file mode 100755 index 9dd037e..0000000 --- a/third_party/glm/gtc/quaternion.inl +++ /dev/null @@ -1,200 +0,0 @@ -#include "../trigonometric.hpp" -#include "../geometric.hpp" -#include "../exponential.hpp" -#include "epsilon.hpp" -#include - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> eulerAngles(qua const& x) - { - return vec<3, T, Q>(pitch(x), yaw(x), roll(x)); - } - - template - GLM_FUNC_QUALIFIER T roll(qua const& q) - { - return static_cast(atan(static_cast(2) * (q.x * q.y + q.w * q.z), q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z)); - } - - template - GLM_FUNC_QUALIFIER T pitch(qua const& q) - { - //return T(atan(T(2) * (q.y * q.z + q.w * q.x), q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z)); - T const y = static_cast(2) * (q.y * q.z + q.w * q.x); - T const x = q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z; - - if(all(equal(vec<2, T, Q>(x, y), vec<2, T, Q>(0), epsilon()))) //avoid atan2(0,0) - handle singularity - Matiis - return static_cast(static_cast(2) * atan(q.x, q.w)); - - return static_cast(atan(y, x)); - } - - template - GLM_FUNC_QUALIFIER T yaw(qua const& q) - { - return asin(clamp(static_cast(-2) * (q.x * q.z - q.w * q.y), static_cast(-1), static_cast(1))); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> mat3_cast(qua const& q) - { - mat<3, 3, T, Q> Result(T(1)); - T qxx(q.x * q.x); - T qyy(q.y * q.y); - T qzz(q.z * q.z); - T qxz(q.x * q.z); - T qxy(q.x * q.y); - T qyz(q.y * q.z); - T qwx(q.w * q.x); - T qwy(q.w * q.y); - T qwz(q.w * q.z); - - Result[0][0] = T(1) - T(2) * (qyy + qzz); - Result[0][1] = T(2) * (qxy + qwz); - Result[0][2] = T(2) * (qxz - qwy); - - Result[1][0] = T(2) * (qxy - qwz); - Result[1][1] = T(1) - T(2) * (qxx + qzz); - Result[1][2] = T(2) * (qyz + qwx); - - Result[2][0] = T(2) * (qxz + qwy); - Result[2][1] = T(2) * (qyz - qwx); - Result[2][2] = T(1) - T(2) * (qxx + qyy); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> mat4_cast(qua const& q) - { - return mat<4, 4, T, Q>(mat3_cast(q)); - } - - template - GLM_FUNC_QUALIFIER qua quat_cast(mat<3, 3, T, Q> const& m) - { - T fourXSquaredMinus1 = m[0][0] - m[1][1] - m[2][2]; - T fourYSquaredMinus1 = m[1][1] - m[0][0] - m[2][2]; - T fourZSquaredMinus1 = m[2][2] - m[0][0] - m[1][1]; - T fourWSquaredMinus1 = m[0][0] + m[1][1] + m[2][2]; - - int biggestIndex = 0; - T fourBiggestSquaredMinus1 = fourWSquaredMinus1; - if(fourXSquaredMinus1 > fourBiggestSquaredMinus1) - { - fourBiggestSquaredMinus1 = fourXSquaredMinus1; - biggestIndex = 1; - } - if(fourYSquaredMinus1 > fourBiggestSquaredMinus1) - { - fourBiggestSquaredMinus1 = fourYSquaredMinus1; - biggestIndex = 2; - } - if(fourZSquaredMinus1 > fourBiggestSquaredMinus1) - { - fourBiggestSquaredMinus1 = fourZSquaredMinus1; - biggestIndex = 3; - } - - T biggestVal = sqrt(fourBiggestSquaredMinus1 + static_cast(1)) * static_cast(0.5); - T mult = static_cast(0.25) / biggestVal; - - switch(biggestIndex) - { - case 0: - return qua(biggestVal, (m[1][2] - m[2][1]) * mult, (m[2][0] - m[0][2]) * mult, (m[0][1] - m[1][0]) * mult); - case 1: - return qua((m[1][2] - m[2][1]) * mult, biggestVal, (m[0][1] + m[1][0]) * mult, (m[2][0] + m[0][2]) * mult); - case 2: - return qua((m[2][0] - m[0][2]) * mult, (m[0][1] + m[1][0]) * mult, biggestVal, (m[1][2] + m[2][1]) * mult); - case 3: - return qua((m[0][1] - m[1][0]) * mult, (m[2][0] + m[0][2]) * mult, (m[1][2] + m[2][1]) * mult, biggestVal); - default: // Silence a -Wswitch-default warning in GCC. Should never actually get here. Assert is just for sanity. - assert(false); - return qua(1, 0, 0, 0); - } - } - - template - GLM_FUNC_QUALIFIER qua quat_cast(mat<4, 4, T, Q> const& m4) - { - return quat_cast(mat<3, 3, T, Q>(m4)); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> lessThan(qua const& x, qua const& y) - { - vec<4, bool, Q> Result; - for(length_t i = 0; i < x.length(); ++i) - Result[i] = x[i] < y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> lessThanEqual(qua const& x, qua const& y) - { - vec<4, bool, Q> Result; - for(length_t i = 0; i < x.length(); ++i) - Result[i] = x[i] <= y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> greaterThan(qua const& x, qua const& y) - { - vec<4, bool, Q> Result; - for(length_t i = 0; i < x.length(); ++i) - Result[i] = x[i] > y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> greaterThanEqual(qua const& x, qua const& y) - { - vec<4, bool, Q> Result; - for(length_t i = 0; i < x.length(); ++i) - Result[i] = x[i] >= y[i]; - return Result; - } - - - template - GLM_FUNC_QUALIFIER qua quatLookAt(vec<3, T, Q> const& direction, vec<3, T, Q> const& up) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return quatLookAtLH(direction, up); -# else - return quatLookAtRH(direction, up); -# endif - } - - template - GLM_FUNC_QUALIFIER qua quatLookAtRH(vec<3, T, Q> const& direction, vec<3, T, Q> const& up) - { - mat<3, 3, T, Q> Result; - - Result[2] = -direction; - Result[0] = normalize(cross(up, Result[2])); - Result[1] = cross(Result[2], Result[0]); - - return quat_cast(Result); - } - - template - GLM_FUNC_QUALIFIER qua quatLookAtLH(vec<3, T, Q> const& direction, vec<3, T, Q> const& up) - { - mat<3, 3, T, Q> Result; - - Result[2] = direction; - Result[0] = normalize(cross(up, Result[2])); - Result[1] = cross(Result[2], Result[0]); - - return quat_cast(Result); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "quaternion_simd.inl" -#endif - diff --git a/third_party/glm/gtc/quaternion_simd.inl b/third_party/glm/gtc/quaternion_simd.inl deleted file mode 100755 index e69de29..0000000 diff --git a/third_party/glm/gtc/random.hpp b/third_party/glm/gtc/random.hpp deleted file mode 100755 index 9a85958..0000000 --- a/third_party/glm/gtc/random.hpp +++ /dev/null @@ -1,82 +0,0 @@ -/// @ref gtc_random -/// @file glm/gtc/random.hpp -/// -/// @see core (dependence) -/// @see gtx_random (extended) -/// -/// @defgroup gtc_random GLM_GTC_random -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Generate random number from various distribution methods. - -#pragma once - -// Dependency: -#include "../ext/scalar_int_sized.hpp" -#include "../ext/scalar_uint_sized.hpp" -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_random extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_random - /// @{ - - /// Generate random numbers in the interval [Min, Max], according a linear distribution - /// - /// @param Min Minimum value included in the sampling - /// @param Max Maximum value included in the sampling - /// @tparam genType Value type. Currently supported: float or double scalars. - /// @see gtc_random - template - GLM_FUNC_DECL genType linearRand(genType Min, genType Max); - - /// Generate random numbers in the interval [Min, Max], according a linear distribution - /// - /// @param Min Minimum value included in the sampling - /// @param Max Maximum value included in the sampling - /// @tparam T Value type. Currently supported: float or double. - /// - /// @see gtc_random - template - GLM_FUNC_DECL vec linearRand(vec const& Min, vec const& Max); - - /// Generate random numbers in the interval [Min, Max], according a gaussian distribution - /// - /// @see gtc_random - template - GLM_FUNC_DECL genType gaussRand(genType Mean, genType Deviation); - - /// Generate a random 2D vector which coordinates are regulary distributed on a circle of a given radius - /// - /// @see gtc_random - template - GLM_FUNC_DECL vec<2, T, defaultp> circularRand(T Radius); - - /// Generate a random 3D vector which coordinates are regulary distributed on a sphere of a given radius - /// - /// @see gtc_random - template - GLM_FUNC_DECL vec<3, T, defaultp> sphericalRand(T Radius); - - /// Generate a random 2D vector which coordinates are regulary distributed within the area of a disk of a given radius - /// - /// @see gtc_random - template - GLM_FUNC_DECL vec<2, T, defaultp> diskRand(T Radius); - - /// Generate a random 3D vector which coordinates are regulary distributed within the volume of a ball of a given radius - /// - /// @see gtc_random - template - GLM_FUNC_DECL vec<3, T, defaultp> ballRand(T Radius); - - /// @} -}//namespace glm - -#include "random.inl" diff --git a/third_party/glm/gtc/random.inl b/third_party/glm/gtc/random.inl deleted file mode 100755 index 7048509..0000000 --- a/third_party/glm/gtc/random.inl +++ /dev/null @@ -1,303 +0,0 @@ -#include "../geometric.hpp" -#include "../exponential.hpp" -#include "../trigonometric.hpp" -#include "../detail/type_vec1.hpp" -#include -#include -#include -#include - -namespace glm{ -namespace detail -{ - template - struct compute_rand - { - GLM_FUNC_QUALIFIER static vec call(); - }; - - template - struct compute_rand<1, uint8, P> - { - GLM_FUNC_QUALIFIER static vec<1, uint8, P> call() - { - return vec<1, uint8, P>( - std::rand() % std::numeric_limits::max()); - } - }; - - template - struct compute_rand<2, uint8, P> - { - GLM_FUNC_QUALIFIER static vec<2, uint8, P> call() - { - return vec<2, uint8, P>( - std::rand() % std::numeric_limits::max(), - std::rand() % std::numeric_limits::max()); - } - }; - - template - struct compute_rand<3, uint8, P> - { - GLM_FUNC_QUALIFIER static vec<3, uint8, P> call() - { - return vec<3, uint8, P>( - std::rand() % std::numeric_limits::max(), - std::rand() % std::numeric_limits::max(), - std::rand() % std::numeric_limits::max()); - } - }; - - template - struct compute_rand<4, uint8, P> - { - GLM_FUNC_QUALIFIER static vec<4, uint8, P> call() - { - return vec<4, uint8, P>( - std::rand() % std::numeric_limits::max(), - std::rand() % std::numeric_limits::max(), - std::rand() % std::numeric_limits::max(), - std::rand() % std::numeric_limits::max()); - } - }; - - template - struct compute_rand - { - GLM_FUNC_QUALIFIER static vec call() - { - return - (vec(compute_rand::call()) << static_cast(8)) | - (vec(compute_rand::call()) << static_cast(0)); - } - }; - - template - struct compute_rand - { - GLM_FUNC_QUALIFIER static vec call() - { - return - (vec(compute_rand::call()) << static_cast(16)) | - (vec(compute_rand::call()) << static_cast(0)); - } - }; - - template - struct compute_rand - { - GLM_FUNC_QUALIFIER static vec call() - { - return - (vec(compute_rand::call()) << static_cast(32)) | - (vec(compute_rand::call()) << static_cast(0)); - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max); - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return vec(compute_rand::call()) / static_cast(std::numeric_limits::max()) * (Max - Min) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return vec(compute_rand::call()) / static_cast(std::numeric_limits::max()) * (Max - Min) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return vec(compute_rand::call()) / static_cast(std::numeric_limits::max()) * (Max - Min) + Min; - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER genType linearRand(genType Min, genType Max) - { - return detail::compute_linearRand<1, genType, highp>::call( - vec<1, genType, highp>(Min), - vec<1, genType, highp>(Max)).x; - } - - template - GLM_FUNC_QUALIFIER vec linearRand(vec const& Min, vec const& Max) - { - return detail::compute_linearRand::call(Min, Max); - } - - template - GLM_FUNC_QUALIFIER genType gaussRand(genType Mean, genType Deviation) - { - genType w, x1, x2; - - do - { - x1 = linearRand(genType(-1), genType(1)); - x2 = linearRand(genType(-1), genType(1)); - - w = x1 * x1 + x2 * x2; - } while(w > genType(1)); - - return static_cast(x2 * Deviation * Deviation * sqrt((genType(-2) * log(w)) / w) + Mean); - } - - template - GLM_FUNC_QUALIFIER vec gaussRand(vec const& Mean, vec const& Deviation) - { - return detail::functor2::call(gaussRand, Mean, Deviation); - } - - template - GLM_FUNC_QUALIFIER vec<2, T, defaultp> diskRand(T Radius) - { - assert(Radius > static_cast(0)); - - vec<2, T, defaultp> Result(T(0)); - T LenRadius(T(0)); - - do - { - Result = linearRand( - vec<2, T, defaultp>(-Radius), - vec<2, T, defaultp>(Radius)); - LenRadius = length(Result); - } - while(LenRadius > Radius); - - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, defaultp> ballRand(T Radius) - { - assert(Radius > static_cast(0)); - - vec<3, T, defaultp> Result(T(0)); - T LenRadius(T(0)); - - do - { - Result = linearRand( - vec<3, T, defaultp>(-Radius), - vec<3, T, defaultp>(Radius)); - LenRadius = length(Result); - } - while(LenRadius > Radius); - - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, defaultp> circularRand(T Radius) - { - assert(Radius > static_cast(0)); - - T a = linearRand(T(0), static_cast(6.283185307179586476925286766559)); - return vec<2, T, defaultp>(glm::cos(a), glm::sin(a)) * Radius; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, defaultp> sphericalRand(T Radius) - { - assert(Radius > static_cast(0)); - - T theta = linearRand(T(0), T(6.283185307179586476925286766559f)); - T phi = std::acos(linearRand(T(-1.0f), T(1.0f))); - - T x = std::sin(phi) * std::cos(theta); - T y = std::sin(phi) * std::sin(theta); - T z = std::cos(phi); - - return vec<3, T, defaultp>(x, y, z) * Radius; - } -}//namespace glm diff --git a/third_party/glm/gtc/reciprocal.hpp b/third_party/glm/gtc/reciprocal.hpp deleted file mode 100755 index c7d1330..0000000 --- a/third_party/glm/gtc/reciprocal.hpp +++ /dev/null @@ -1,135 +0,0 @@ -/// @ref gtc_reciprocal -/// @file glm/gtc/reciprocal.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_reciprocal GLM_GTC_reciprocal -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Define secant, cosecant and cotangent functions. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_reciprocal extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_reciprocal - /// @{ - - /// Secant function. - /// hypotenuse / adjacent or 1 / cos(x) - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType sec(genType angle); - - /// Cosecant function. - /// hypotenuse / opposite or 1 / sin(x) - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType csc(genType angle); - - /// Cotangent function. - /// adjacent / opposite or 1 / tan(x) - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType cot(genType angle); - - /// Inverse secant function. - /// - /// @return Return an angle expressed in radians. - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType asec(genType x); - - /// Inverse cosecant function. - /// - /// @return Return an angle expressed in radians. - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType acsc(genType x); - - /// Inverse cotangent function. - /// - /// @return Return an angle expressed in radians. - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType acot(genType x); - - /// Secant hyperbolic function. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType sech(genType angle); - - /// Cosecant hyperbolic function. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType csch(genType angle); - - /// Cotangent hyperbolic function. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType coth(genType angle); - - /// Inverse secant hyperbolic function. - /// - /// @return Return an angle expressed in radians. - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType asech(genType x); - - /// Inverse cosecant hyperbolic function. - /// - /// @return Return an angle expressed in radians. - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType acsch(genType x); - - /// Inverse cotangent hyperbolic function. - /// - /// @return Return an angle expressed in radians. - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType acoth(genType x); - - /// @} -}//namespace glm - -#include "reciprocal.inl" diff --git a/third_party/glm/gtc/reciprocal.inl b/third_party/glm/gtc/reciprocal.inl deleted file mode 100755 index d88729e..0000000 --- a/third_party/glm/gtc/reciprocal.inl +++ /dev/null @@ -1,191 +0,0 @@ -/// @ref gtc_reciprocal - -#include "../trigonometric.hpp" -#include - -namespace glm -{ - // sec - template - GLM_FUNC_QUALIFIER genType sec(genType angle) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'sec' only accept floating-point values"); - return genType(1) / glm::cos(angle); - } - - template - GLM_FUNC_QUALIFIER vec sec(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'sec' only accept floating-point inputs"); - return detail::functor1::call(sec, x); - } - - // csc - template - GLM_FUNC_QUALIFIER genType csc(genType angle) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'csc' only accept floating-point values"); - return genType(1) / glm::sin(angle); - } - - template - GLM_FUNC_QUALIFIER vec csc(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'csc' only accept floating-point inputs"); - return detail::functor1::call(csc, x); - } - - // cot - template - GLM_FUNC_QUALIFIER genType cot(genType angle) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'cot' only accept floating-point values"); - - genType const pi_over_2 = genType(3.1415926535897932384626433832795 / 2.0); - return glm::tan(pi_over_2 - angle); - } - - template - GLM_FUNC_QUALIFIER vec cot(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'cot' only accept floating-point inputs"); - return detail::functor1::call(cot, x); - } - - // asec - template - GLM_FUNC_QUALIFIER genType asec(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'asec' only accept floating-point values"); - return acos(genType(1) / x); - } - - template - GLM_FUNC_QUALIFIER vec asec(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'asec' only accept floating-point inputs"); - return detail::functor1::call(asec, x); - } - - // acsc - template - GLM_FUNC_QUALIFIER genType acsc(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acsc' only accept floating-point values"); - return asin(genType(1) / x); - } - - template - GLM_FUNC_QUALIFIER vec acsc(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acsc' only accept floating-point inputs"); - return detail::functor1::call(acsc, x); - } - - // acot - template - GLM_FUNC_QUALIFIER genType acot(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acot' only accept floating-point values"); - - genType const pi_over_2 = genType(3.1415926535897932384626433832795 / 2.0); - return pi_over_2 - atan(x); - } - - template - GLM_FUNC_QUALIFIER vec acot(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acot' only accept floating-point inputs"); - return detail::functor1::call(acot, x); - } - - // sech - template - GLM_FUNC_QUALIFIER genType sech(genType angle) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'sech' only accept floating-point values"); - return genType(1) / glm::cosh(angle); - } - - template - GLM_FUNC_QUALIFIER vec sech(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'sech' only accept floating-point inputs"); - return detail::functor1::call(sech, x); - } - - // csch - template - GLM_FUNC_QUALIFIER genType csch(genType angle) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'csch' only accept floating-point values"); - return genType(1) / glm::sinh(angle); - } - - template - GLM_FUNC_QUALIFIER vec csch(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'csch' only accept floating-point inputs"); - return detail::functor1::call(csch, x); - } - - // coth - template - GLM_FUNC_QUALIFIER genType coth(genType angle) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'coth' only accept floating-point values"); - return glm::cosh(angle) / glm::sinh(angle); - } - - template - GLM_FUNC_QUALIFIER vec coth(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'coth' only accept floating-point inputs"); - return detail::functor1::call(coth, x); - } - - // asech - template - GLM_FUNC_QUALIFIER genType asech(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'asech' only accept floating-point values"); - return acosh(genType(1) / x); - } - - template - GLM_FUNC_QUALIFIER vec asech(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'asech' only accept floating-point inputs"); - return detail::functor1::call(asech, x); - } - - // acsch - template - GLM_FUNC_QUALIFIER genType acsch(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acsch' only accept floating-point values"); - return asinh(genType(1) / x); - } - - template - GLM_FUNC_QUALIFIER vec acsch(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acsch' only accept floating-point inputs"); - return detail::functor1::call(acsch, x); - } - - // acoth - template - GLM_FUNC_QUALIFIER genType acoth(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acoth' only accept floating-point values"); - return atanh(genType(1) / x); - } - - template - GLM_FUNC_QUALIFIER vec acoth(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acoth' only accept floating-point inputs"); - return detail::functor1::call(acoth, x); - } -}//namespace glm diff --git a/third_party/glm/gtc/round.hpp b/third_party/glm/gtc/round.hpp deleted file mode 100755 index 56edbbc..0000000 --- a/third_party/glm/gtc/round.hpp +++ /dev/null @@ -1,160 +0,0 @@ -/// @ref gtc_round -/// @file glm/gtc/round.hpp -/// -/// @see core (dependence) -/// @see gtc_round (dependence) -/// -/// @defgroup gtc_round GLM_GTC_round -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Rounding value to specific boundings - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/_vectorize.hpp" -#include "../vector_relational.hpp" -#include "../common.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_round extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_round - /// @{ - - /// Return the power of two number which value is just higher the input value, - /// round up to a power of two. - /// - /// @see gtc_round - template - GLM_FUNC_DECL genIUType ceilPowerOfTwo(genIUType v); - - /// Return the power of two number which value is just higher the input value, - /// round up to a power of two. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_round - template - GLM_FUNC_DECL vec ceilPowerOfTwo(vec const& v); - - /// Return the power of two number which value is just lower the input value, - /// round down to a power of two. - /// - /// @see gtc_round - template - GLM_FUNC_DECL genIUType floorPowerOfTwo(genIUType v); - - /// Return the power of two number which value is just lower the input value, - /// round down to a power of two. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_round - template - GLM_FUNC_DECL vec floorPowerOfTwo(vec const& v); - - /// Return the power of two number which value is the closet to the input value. - /// - /// @see gtc_round - template - GLM_FUNC_DECL genIUType roundPowerOfTwo(genIUType v); - - /// Return the power of two number which value is the closet to the input value. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_round - template - GLM_FUNC_DECL vec roundPowerOfTwo(vec const& v); - - /// Higher multiple number of Source. - /// - /// @tparam genType Floating-point or integer scalar or vector types. - /// - /// @param v Source value to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see gtc_round - template - GLM_FUNC_DECL genType ceilMultiple(genType v, genType Multiple); - - /// Higher multiple number of Source. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @param v Source values to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see gtc_round - template - GLM_FUNC_DECL vec ceilMultiple(vec const& v, vec const& Multiple); - - /// Lower multiple number of Source. - /// - /// @tparam genType Floating-point or integer scalar or vector types. - /// - /// @param v Source value to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see gtc_round - template - GLM_FUNC_DECL genType floorMultiple(genType v, genType Multiple); - - /// Lower multiple number of Source. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @param v Source values to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see gtc_round - template - GLM_FUNC_DECL vec floorMultiple(vec const& v, vec const& Multiple); - - /// Lower multiple number of Source. - /// - /// @tparam genType Floating-point or integer scalar or vector types. - /// - /// @param v Source value to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see gtc_round - template - GLM_FUNC_DECL genType roundMultiple(genType v, genType Multiple); - - /// Lower multiple number of Source. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @param v Source values to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see gtc_round - template - GLM_FUNC_DECL vec roundMultiple(vec const& v, vec const& Multiple); - - /// @} -} //namespace glm - -#include "round.inl" diff --git a/third_party/glm/gtc/round.inl b/third_party/glm/gtc/round.inl deleted file mode 100755 index 48411e4..0000000 --- a/third_party/glm/gtc/round.inl +++ /dev/null @@ -1,155 +0,0 @@ -/// @ref gtc_round - -#include "../integer.hpp" -#include "../ext/vector_integer.hpp" - -namespace glm{ -namespace detail -{ - template - struct compute_roundMultiple {}; - - template<> - struct compute_roundMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - if (Source >= genType(0)) - return Source - std::fmod(Source, Multiple); - else - { - genType Tmp = Source + genType(1); - return Tmp - std::fmod(Tmp, Multiple) - Multiple; - } - } - }; - - template<> - struct compute_roundMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - if (Source >= genType(0)) - return Source - Source % Multiple; - else - { - genType Tmp = Source + genType(1); - return Tmp - Tmp % Multiple - Multiple; - } - } - }; - - template<> - struct compute_roundMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - if (Source >= genType(0)) - return Source - Source % Multiple; - else - { - genType Tmp = Source + genType(1); - return Tmp - Tmp % Multiple - Multiple; - } - } - }; -}//namespace detail - - ////////////////// - // ceilPowerOfTwo - - template - GLM_FUNC_QUALIFIER genType ceilPowerOfTwo(genType value) - { - return detail::compute_ceilPowerOfTwo<1, genType, defaultp, std::numeric_limits::is_signed>::call(vec<1, genType, defaultp>(value)).x; - } - - template - GLM_FUNC_QUALIFIER vec ceilPowerOfTwo(vec const& v) - { - return detail::compute_ceilPowerOfTwo::is_signed>::call(v); - } - - /////////////////// - // floorPowerOfTwo - - template - GLM_FUNC_QUALIFIER genType floorPowerOfTwo(genType value) - { - return isPowerOfTwo(value) ? value : static_cast(1) << findMSB(value); - } - - template - GLM_FUNC_QUALIFIER vec floorPowerOfTwo(vec const& v) - { - return detail::functor1::call(floorPowerOfTwo, v); - } - - /////////////////// - // roundPowerOfTwo - - template - GLM_FUNC_QUALIFIER genIUType roundPowerOfTwo(genIUType value) - { - if(isPowerOfTwo(value)) - return value; - - genIUType const prev = static_cast(1) << findMSB(value); - genIUType const next = prev << static_cast(1); - return (next - value) < (value - prev) ? next : prev; - } - - template - GLM_FUNC_QUALIFIER vec roundPowerOfTwo(vec const& v) - { - return detail::functor1::call(roundPowerOfTwo, v); - } - - ////////////////////// - // ceilMultiple - - template - GLM_FUNC_QUALIFIER genType ceilMultiple(genType Source, genType Multiple) - { - return detail::compute_ceilMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); - } - - template - GLM_FUNC_QUALIFIER vec ceilMultiple(vec const& Source, vec const& Multiple) - { - return detail::functor2::call(ceilMultiple, Source, Multiple); - } - - ////////////////////// - // floorMultiple - - template - GLM_FUNC_QUALIFIER genType floorMultiple(genType Source, genType Multiple) - { - return detail::compute_floorMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); - } - - template - GLM_FUNC_QUALIFIER vec floorMultiple(vec const& Source, vec const& Multiple) - { - return detail::functor2::call(floorMultiple, Source, Multiple); - } - - ////////////////////// - // roundMultiple - - template - GLM_FUNC_QUALIFIER genType roundMultiple(genType Source, genType Multiple) - { - return detail::compute_roundMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); - } - - template - GLM_FUNC_QUALIFIER vec roundMultiple(vec const& Source, vec const& Multiple) - { - return detail::functor2::call(roundMultiple, Source, Multiple); - } -}//namespace glm diff --git a/third_party/glm/gtc/type_aligned.hpp b/third_party/glm/gtc/type_aligned.hpp deleted file mode 100755 index 5403abf..0000000 --- a/third_party/glm/gtc/type_aligned.hpp +++ /dev/null @@ -1,1315 +0,0 @@ -/// @ref gtc_type_aligned -/// @file glm/gtc/type_aligned.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_type_aligned GLM_GTC_type_aligned -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Aligned types allowing SIMD optimizations of vectors and matrices types - -#pragma once - -#if (GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE) -# error "GLM: Aligned gentypes require to enable C++ language extensions. Define GLM_FORCE_ALIGNED_GENTYPES before including GLM headers to use aligned types." -#endif - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_type_aligned extension included") -#endif - -#include "../mat4x4.hpp" -#include "../mat4x3.hpp" -#include "../mat4x2.hpp" -#include "../mat3x4.hpp" -#include "../mat3x3.hpp" -#include "../mat3x2.hpp" -#include "../mat2x4.hpp" -#include "../mat2x3.hpp" -#include "../mat2x2.hpp" -#include "../gtc/vec1.hpp" -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" - -namespace glm -{ - /// @addtogroup gtc_type_aligned - /// @{ - - // -- *vec1 -- - - /// 1 component vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<1, float, aligned_highp> aligned_highp_vec1; - - /// 1 component vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<1, float, aligned_mediump> aligned_mediump_vec1; - - /// 1 component vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<1, float, aligned_lowp> aligned_lowp_vec1; - - /// 1 component vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<1, double, aligned_highp> aligned_highp_dvec1; - - /// 1 component vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<1, double, aligned_mediump> aligned_mediump_dvec1; - - /// 1 component vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<1, double, aligned_lowp> aligned_lowp_dvec1; - - /// 1 component vector aligned in memory of signed integer numbers. - typedef vec<1, int, aligned_highp> aligned_highp_ivec1; - - /// 1 component vector aligned in memory of signed integer numbers. - typedef vec<1, int, aligned_mediump> aligned_mediump_ivec1; - - /// 1 component vector aligned in memory of signed integer numbers. - typedef vec<1, int, aligned_lowp> aligned_lowp_ivec1; - - /// 1 component vector aligned in memory of unsigned integer numbers. - typedef vec<1, uint, aligned_highp> aligned_highp_uvec1; - - /// 1 component vector aligned in memory of unsigned integer numbers. - typedef vec<1, uint, aligned_mediump> aligned_mediump_uvec1; - - /// 1 component vector aligned in memory of unsigned integer numbers. - typedef vec<1, uint, aligned_lowp> aligned_lowp_uvec1; - - /// 1 component vector aligned in memory of bool values. - typedef vec<1, bool, aligned_highp> aligned_highp_bvec1; - - /// 1 component vector aligned in memory of bool values. - typedef vec<1, bool, aligned_mediump> aligned_mediump_bvec1; - - /// 1 component vector aligned in memory of bool values. - typedef vec<1, bool, aligned_lowp> aligned_lowp_bvec1; - - /// 1 component vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<1, float, packed_highp> packed_highp_vec1; - - /// 1 component vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<1, float, packed_mediump> packed_mediump_vec1; - - /// 1 component vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<1, float, packed_lowp> packed_lowp_vec1; - - /// 1 component vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<1, double, packed_highp> packed_highp_dvec1; - - /// 1 component vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<1, double, packed_mediump> packed_mediump_dvec1; - - /// 1 component vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<1, double, packed_lowp> packed_lowp_dvec1; - - /// 1 component vector tightly packed in memory of signed integer numbers. - typedef vec<1, int, packed_highp> packed_highp_ivec1; - - /// 1 component vector tightly packed in memory of signed integer numbers. - typedef vec<1, int, packed_mediump> packed_mediump_ivec1; - - /// 1 component vector tightly packed in memory of signed integer numbers. - typedef vec<1, int, packed_lowp> packed_lowp_ivec1; - - /// 1 component vector tightly packed in memory of unsigned integer numbers. - typedef vec<1, uint, packed_highp> packed_highp_uvec1; - - /// 1 component vector tightly packed in memory of unsigned integer numbers. - typedef vec<1, uint, packed_mediump> packed_mediump_uvec1; - - /// 1 component vector tightly packed in memory of unsigned integer numbers. - typedef vec<1, uint, packed_lowp> packed_lowp_uvec1; - - /// 1 component vector tightly packed in memory of bool values. - typedef vec<1, bool, packed_highp> packed_highp_bvec1; - - /// 1 component vector tightly packed in memory of bool values. - typedef vec<1, bool, packed_mediump> packed_mediump_bvec1; - - /// 1 component vector tightly packed in memory of bool values. - typedef vec<1, bool, packed_lowp> packed_lowp_bvec1; - - // -- *vec2 -- - - /// 2 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<2, float, aligned_highp> aligned_highp_vec2; - - /// 2 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<2, float, aligned_mediump> aligned_mediump_vec2; - - /// 2 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<2, float, aligned_lowp> aligned_lowp_vec2; - - /// 2 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<2, double, aligned_highp> aligned_highp_dvec2; - - /// 2 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<2, double, aligned_mediump> aligned_mediump_dvec2; - - /// 2 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<2, double, aligned_lowp> aligned_lowp_dvec2; - - /// 2 components vector aligned in memory of signed integer numbers. - typedef vec<2, int, aligned_highp> aligned_highp_ivec2; - - /// 2 components vector aligned in memory of signed integer numbers. - typedef vec<2, int, aligned_mediump> aligned_mediump_ivec2; - - /// 2 components vector aligned in memory of signed integer numbers. - typedef vec<2, int, aligned_lowp> aligned_lowp_ivec2; - - /// 2 components vector aligned in memory of unsigned integer numbers. - typedef vec<2, uint, aligned_highp> aligned_highp_uvec2; - - /// 2 components vector aligned in memory of unsigned integer numbers. - typedef vec<2, uint, aligned_mediump> aligned_mediump_uvec2; - - /// 2 components vector aligned in memory of unsigned integer numbers. - typedef vec<2, uint, aligned_lowp> aligned_lowp_uvec2; - - /// 2 components vector aligned in memory of bool values. - typedef vec<2, bool, aligned_highp> aligned_highp_bvec2; - - /// 2 components vector aligned in memory of bool values. - typedef vec<2, bool, aligned_mediump> aligned_mediump_bvec2; - - /// 2 components vector aligned in memory of bool values. - typedef vec<2, bool, aligned_lowp> aligned_lowp_bvec2; - - /// 2 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<2, float, packed_highp> packed_highp_vec2; - - /// 2 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<2, float, packed_mediump> packed_mediump_vec2; - - /// 2 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<2, float, packed_lowp> packed_lowp_vec2; - - /// 2 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<2, double, packed_highp> packed_highp_dvec2; - - /// 2 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<2, double, packed_mediump> packed_mediump_dvec2; - - /// 2 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<2, double, packed_lowp> packed_lowp_dvec2; - - /// 2 components vector tightly packed in memory of signed integer numbers. - typedef vec<2, int, packed_highp> packed_highp_ivec2; - - /// 2 components vector tightly packed in memory of signed integer numbers. - typedef vec<2, int, packed_mediump> packed_mediump_ivec2; - - /// 2 components vector tightly packed in memory of signed integer numbers. - typedef vec<2, int, packed_lowp> packed_lowp_ivec2; - - /// 2 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<2, uint, packed_highp> packed_highp_uvec2; - - /// 2 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<2, uint, packed_mediump> packed_mediump_uvec2; - - /// 2 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<2, uint, packed_lowp> packed_lowp_uvec2; - - /// 2 components vector tightly packed in memory of bool values. - typedef vec<2, bool, packed_highp> packed_highp_bvec2; - - /// 2 components vector tightly packed in memory of bool values. - typedef vec<2, bool, packed_mediump> packed_mediump_bvec2; - - /// 2 components vector tightly packed in memory of bool values. - typedef vec<2, bool, packed_lowp> packed_lowp_bvec2; - - // -- *vec3 -- - - /// 3 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<3, float, aligned_highp> aligned_highp_vec3; - - /// 3 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<3, float, aligned_mediump> aligned_mediump_vec3; - - /// 3 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<3, float, aligned_lowp> aligned_lowp_vec3; - - /// 3 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<3, double, aligned_highp> aligned_highp_dvec3; - - /// 3 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<3, double, aligned_mediump> aligned_mediump_dvec3; - - /// 3 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<3, double, aligned_lowp> aligned_lowp_dvec3; - - /// 3 components vector aligned in memory of signed integer numbers. - typedef vec<3, int, aligned_highp> aligned_highp_ivec3; - - /// 3 components vector aligned in memory of signed integer numbers. - typedef vec<3, int, aligned_mediump> aligned_mediump_ivec3; - - /// 3 components vector aligned in memory of signed integer numbers. - typedef vec<3, int, aligned_lowp> aligned_lowp_ivec3; - - /// 3 components vector aligned in memory of unsigned integer numbers. - typedef vec<3, uint, aligned_highp> aligned_highp_uvec3; - - /// 3 components vector aligned in memory of unsigned integer numbers. - typedef vec<3, uint, aligned_mediump> aligned_mediump_uvec3; - - /// 3 components vector aligned in memory of unsigned integer numbers. - typedef vec<3, uint, aligned_lowp> aligned_lowp_uvec3; - - /// 3 components vector aligned in memory of bool values. - typedef vec<3, bool, aligned_highp> aligned_highp_bvec3; - - /// 3 components vector aligned in memory of bool values. - typedef vec<3, bool, aligned_mediump> aligned_mediump_bvec3; - - /// 3 components vector aligned in memory of bool values. - typedef vec<3, bool, aligned_lowp> aligned_lowp_bvec3; - - /// 3 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<3, float, packed_highp> packed_highp_vec3; - - /// 3 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<3, float, packed_mediump> packed_mediump_vec3; - - /// 3 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<3, float, packed_lowp> packed_lowp_vec3; - - /// 3 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<3, double, packed_highp> packed_highp_dvec3; - - /// 3 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<3, double, packed_mediump> packed_mediump_dvec3; - - /// 3 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<3, double, packed_lowp> packed_lowp_dvec3; - - /// 3 components vector tightly packed in memory of signed integer numbers. - typedef vec<3, int, packed_highp> packed_highp_ivec3; - - /// 3 components vector tightly packed in memory of signed integer numbers. - typedef vec<3, int, packed_mediump> packed_mediump_ivec3; - - /// 3 components vector tightly packed in memory of signed integer numbers. - typedef vec<3, int, packed_lowp> packed_lowp_ivec3; - - /// 3 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<3, uint, packed_highp> packed_highp_uvec3; - - /// 3 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<3, uint, packed_mediump> packed_mediump_uvec3; - - /// 3 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<3, uint, packed_lowp> packed_lowp_uvec3; - - /// 3 components vector tightly packed in memory of bool values. - typedef vec<3, bool, packed_highp> packed_highp_bvec3; - - /// 3 components vector tightly packed in memory of bool values. - typedef vec<3, bool, packed_mediump> packed_mediump_bvec3; - - /// 3 components vector tightly packed in memory of bool values. - typedef vec<3, bool, packed_lowp> packed_lowp_bvec3; - - // -- *vec4 -- - - /// 4 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<4, float, aligned_highp> aligned_highp_vec4; - - /// 4 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<4, float, aligned_mediump> aligned_mediump_vec4; - - /// 4 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<4, float, aligned_lowp> aligned_lowp_vec4; - - /// 4 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<4, double, aligned_highp> aligned_highp_dvec4; - - /// 4 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<4, double, aligned_mediump> aligned_mediump_dvec4; - - /// 4 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<4, double, aligned_lowp> aligned_lowp_dvec4; - - /// 4 components vector aligned in memory of signed integer numbers. - typedef vec<4, int, aligned_highp> aligned_highp_ivec4; - - /// 4 components vector aligned in memory of signed integer numbers. - typedef vec<4, int, aligned_mediump> aligned_mediump_ivec4; - - /// 4 components vector aligned in memory of signed integer numbers. - typedef vec<4, int, aligned_lowp> aligned_lowp_ivec4; - - /// 4 components vector aligned in memory of unsigned integer numbers. - typedef vec<4, uint, aligned_highp> aligned_highp_uvec4; - - /// 4 components vector aligned in memory of unsigned integer numbers. - typedef vec<4, uint, aligned_mediump> aligned_mediump_uvec4; - - /// 4 components vector aligned in memory of unsigned integer numbers. - typedef vec<4, uint, aligned_lowp> aligned_lowp_uvec4; - - /// 4 components vector aligned in memory of bool values. - typedef vec<4, bool, aligned_highp> aligned_highp_bvec4; - - /// 4 components vector aligned in memory of bool values. - typedef vec<4, bool, aligned_mediump> aligned_mediump_bvec4; - - /// 4 components vector aligned in memory of bool values. - typedef vec<4, bool, aligned_lowp> aligned_lowp_bvec4; - - /// 4 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<4, float, packed_highp> packed_highp_vec4; - - /// 4 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<4, float, packed_mediump> packed_mediump_vec4; - - /// 4 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<4, float, packed_lowp> packed_lowp_vec4; - - /// 4 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<4, double, packed_highp> packed_highp_dvec4; - - /// 4 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<4, double, packed_mediump> packed_mediump_dvec4; - - /// 4 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<4, double, packed_lowp> packed_lowp_dvec4; - - /// 4 components vector tightly packed in memory of signed integer numbers. - typedef vec<4, int, packed_highp> packed_highp_ivec4; - - /// 4 components vector tightly packed in memory of signed integer numbers. - typedef vec<4, int, packed_mediump> packed_mediump_ivec4; - - /// 4 components vector tightly packed in memory of signed integer numbers. - typedef vec<4, int, packed_lowp> packed_lowp_ivec4; - - /// 4 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<4, uint, packed_highp> packed_highp_uvec4; - - /// 4 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<4, uint, packed_mediump> packed_mediump_uvec4; - - /// 4 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<4, uint, packed_lowp> packed_lowp_uvec4; - - /// 4 components vector tightly packed in memory of bool values. - typedef vec<4, bool, packed_highp> packed_highp_bvec4; - - /// 4 components vector tightly packed in memory of bool values. - typedef vec<4, bool, packed_mediump> packed_mediump_bvec4; - - /// 4 components vector tightly packed in memory of bool values. - typedef vec<4, bool, packed_lowp> packed_lowp_bvec4; - - // -- *mat2 -- - - /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, float, aligned_highp> aligned_highp_mat2; - - /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, float, aligned_mediump> aligned_mediump_mat2; - - /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, float, aligned_lowp> aligned_lowp_mat2; - - /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, double, aligned_highp> aligned_highp_dmat2; - - /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, double, aligned_mediump> aligned_mediump_dmat2; - - /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, double, aligned_lowp> aligned_lowp_dmat2; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, float, packed_highp> packed_highp_mat2; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, float, packed_mediump> packed_mediump_mat2; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, float, packed_lowp> packed_lowp_mat2; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, double, packed_highp> packed_highp_dmat2; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, double, packed_mediump> packed_mediump_dmat2; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, double, packed_lowp> packed_lowp_dmat2; - - // -- *mat3 -- - - /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, float, aligned_highp> aligned_highp_mat3; - - /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, float, aligned_mediump> aligned_mediump_mat3; - - /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, float, aligned_lowp> aligned_lowp_mat3; - - /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, double, aligned_highp> aligned_highp_dmat3; - - /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, double, aligned_mediump> aligned_mediump_dmat3; - - /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, double, aligned_lowp> aligned_lowp_dmat3; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, float, packed_highp> packed_highp_mat3; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, float, packed_mediump> packed_mediump_mat3; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, float, packed_lowp> packed_lowp_mat3; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, double, packed_highp> packed_highp_dmat3; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, double, packed_mediump> packed_mediump_dmat3; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, double, packed_lowp> packed_lowp_dmat3; - - // -- *mat4 -- - - /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, float, aligned_highp> aligned_highp_mat4; - - /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, float, aligned_mediump> aligned_mediump_mat4; - - /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, float, aligned_lowp> aligned_lowp_mat4; - - /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, double, aligned_highp> aligned_highp_dmat4; - - /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, double, aligned_mediump> aligned_mediump_dmat4; - - /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, double, aligned_lowp> aligned_lowp_dmat4; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, float, packed_highp> packed_highp_mat4; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, float, packed_mediump> packed_mediump_mat4; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, float, packed_lowp> packed_lowp_mat4; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, double, packed_highp> packed_highp_dmat4; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, double, packed_mediump> packed_mediump_dmat4; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, double, packed_lowp> packed_lowp_dmat4; - - // -- *mat2x2 -- - - /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, float, aligned_highp> aligned_highp_mat2x2; - - /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, float, aligned_mediump> aligned_mediump_mat2x2; - - /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, float, aligned_lowp> aligned_lowp_mat2x2; - - /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, double, aligned_highp> aligned_highp_dmat2x2; - - /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, double, aligned_mediump> aligned_mediump_dmat2x2; - - /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, double, aligned_lowp> aligned_lowp_dmat2x2; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, float, packed_highp> packed_highp_mat2x2; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, float, packed_mediump> packed_mediump_mat2x2; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, float, packed_lowp> packed_lowp_mat2x2; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, double, packed_highp> packed_highp_dmat2x2; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, double, packed_mediump> packed_mediump_dmat2x2; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, double, packed_lowp> packed_lowp_dmat2x2; - - // -- *mat2x3 -- - - /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 3, float, aligned_highp> aligned_highp_mat2x3; - - /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 3, float, aligned_mediump> aligned_mediump_mat2x3; - - /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 3, float, aligned_lowp> aligned_lowp_mat2x3; - - /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 3, double, aligned_highp> aligned_highp_dmat2x3; - - /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 3, double, aligned_mediump> aligned_mediump_dmat2x3; - - /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 3, double, aligned_lowp> aligned_lowp_dmat2x3; - - /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 3, float, packed_highp> packed_highp_mat2x3; - - /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 3, float, packed_mediump> packed_mediump_mat2x3; - - /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 3, float, packed_lowp> packed_lowp_mat2x3; - - /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 3, double, packed_highp> packed_highp_dmat2x3; - - /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 3, double, packed_mediump> packed_mediump_dmat2x3; - - /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 3, double, packed_lowp> packed_lowp_dmat2x3; - - // -- *mat2x4 -- - - /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 4, float, aligned_highp> aligned_highp_mat2x4; - - /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 4, float, aligned_mediump> aligned_mediump_mat2x4; - - /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 4, float, aligned_lowp> aligned_lowp_mat2x4; - - /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 4, double, aligned_highp> aligned_highp_dmat2x4; - - /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 4, double, aligned_mediump> aligned_mediump_dmat2x4; - - /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 4, double, aligned_lowp> aligned_lowp_dmat2x4; - - /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 4, float, packed_highp> packed_highp_mat2x4; - - /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 4, float, packed_mediump> packed_mediump_mat2x4; - - /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 4, float, packed_lowp> packed_lowp_mat2x4; - - /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 4, double, packed_highp> packed_highp_dmat2x4; - - /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 4, double, packed_mediump> packed_mediump_dmat2x4; - - /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 4, double, packed_lowp> packed_lowp_dmat2x4; - - // -- *mat3x2 -- - - /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 2, float, aligned_highp> aligned_highp_mat3x2; - - /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 2, float, aligned_mediump> aligned_mediump_mat3x2; - - /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 2, float, aligned_lowp> aligned_lowp_mat3x2; - - /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 2, double, aligned_highp> aligned_highp_dmat3x2; - - /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 2, double, aligned_mediump> aligned_mediump_dmat3x2; - - /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 2, double, aligned_lowp> aligned_lowp_dmat3x2; - - /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 2, float, packed_highp> packed_highp_mat3x2; - - /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 2, float, packed_mediump> packed_mediump_mat3x2; - - /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 2, float, packed_lowp> packed_lowp_mat3x2; - - /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 2, double, packed_highp> packed_highp_dmat3x2; - - /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 2, double, packed_mediump> packed_mediump_dmat3x2; - - /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 2, double, packed_lowp> packed_lowp_dmat3x2; - - // -- *mat3x3 -- - - /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, float, aligned_highp> aligned_highp_mat3x3; - - /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, float, aligned_mediump> aligned_mediump_mat3x3; - - /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, float, aligned_lowp> aligned_lowp_mat3x3; - - /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, double, aligned_highp> aligned_highp_dmat3x3; - - /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, double, aligned_mediump> aligned_mediump_dmat3x3; - - /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, double, aligned_lowp> aligned_lowp_dmat3x3; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, float, packed_highp> packed_highp_mat3x3; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, float, packed_mediump> packed_mediump_mat3x3; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, float, packed_lowp> packed_lowp_mat3x3; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, double, packed_highp> packed_highp_dmat3x3; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, double, packed_mediump> packed_mediump_dmat3x3; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, double, packed_lowp> packed_lowp_dmat3x3; - - // -- *mat3x4 -- - - /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 4, float, aligned_highp> aligned_highp_mat3x4; - - /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 4, float, aligned_mediump> aligned_mediump_mat3x4; - - /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 4, float, aligned_lowp> aligned_lowp_mat3x4; - - /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 4, double, aligned_highp> aligned_highp_dmat3x4; - - /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 4, double, aligned_mediump> aligned_mediump_dmat3x4; - - /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 4, double, aligned_lowp> aligned_lowp_dmat3x4; - - /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 4, float, packed_highp> packed_highp_mat3x4; - - /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 4, float, packed_mediump> packed_mediump_mat3x4; - - /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 4, float, packed_lowp> packed_lowp_mat3x4; - - /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 4, double, packed_highp> packed_highp_dmat3x4; - - /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 4, double, packed_mediump> packed_mediump_dmat3x4; - - /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 4, double, packed_lowp> packed_lowp_dmat3x4; - - // -- *mat4x2 -- - - /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 2, float, aligned_highp> aligned_highp_mat4x2; - - /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 2, float, aligned_mediump> aligned_mediump_mat4x2; - - /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 2, float, aligned_lowp> aligned_lowp_mat4x2; - - /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 2, double, aligned_highp> aligned_highp_dmat4x2; - - /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 2, double, aligned_mediump> aligned_mediump_dmat4x2; - - /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 2, double, aligned_lowp> aligned_lowp_dmat4x2; - - /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 2, float, packed_highp> packed_highp_mat4x2; - - /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 2, float, packed_mediump> packed_mediump_mat4x2; - - /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 2, float, packed_lowp> packed_lowp_mat4x2; - - /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 2, double, packed_highp> packed_highp_dmat4x2; - - /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 2, double, packed_mediump> packed_mediump_dmat4x2; - - /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 2, double, packed_lowp> packed_lowp_dmat4x2; - - // -- *mat4x3 -- - - /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 3, float, aligned_highp> aligned_highp_mat4x3; - - /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 3, float, aligned_mediump> aligned_mediump_mat4x3; - - /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 3, float, aligned_lowp> aligned_lowp_mat4x3; - - /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 3, double, aligned_highp> aligned_highp_dmat4x3; - - /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 3, double, aligned_mediump> aligned_mediump_dmat4x3; - - /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 3, double, aligned_lowp> aligned_lowp_dmat4x3; - - /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 3, float, packed_highp> packed_highp_mat4x3; - - /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 3, float, packed_mediump> packed_mediump_mat4x3; - - /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 3, float, packed_lowp> packed_lowp_mat4x3; - - /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 3, double, packed_highp> packed_highp_dmat4x3; - - /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 3, double, packed_mediump> packed_mediump_dmat4x3; - - /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 3, double, packed_lowp> packed_lowp_dmat4x3; - - // -- *mat4x4 -- - - /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, float, aligned_highp> aligned_highp_mat4x4; - - /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, float, aligned_mediump> aligned_mediump_mat4x4; - - /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, float, aligned_lowp> aligned_lowp_mat4x4; - - /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, double, aligned_highp> aligned_highp_dmat4x4; - - /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, double, aligned_mediump> aligned_mediump_dmat4x4; - - /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, double, aligned_lowp> aligned_lowp_dmat4x4; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, float, packed_highp> packed_highp_mat4x4; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, float, packed_mediump> packed_mediump_mat4x4; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, float, packed_lowp> packed_lowp_mat4x4; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, double, packed_highp> packed_highp_dmat4x4; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, double, packed_mediump> packed_mediump_dmat4x4; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, double, packed_lowp> packed_lowp_dmat4x4; - - // -- default -- - -#if(defined(GLM_PRECISION_LOWP_FLOAT)) - typedef aligned_lowp_vec1 aligned_vec1; - typedef aligned_lowp_vec2 aligned_vec2; - typedef aligned_lowp_vec3 aligned_vec3; - typedef aligned_lowp_vec4 aligned_vec4; - typedef packed_lowp_vec1 packed_vec1; - typedef packed_lowp_vec2 packed_vec2; - typedef packed_lowp_vec3 packed_vec3; - typedef packed_lowp_vec4 packed_vec4; - - typedef aligned_lowp_mat2 aligned_mat2; - typedef aligned_lowp_mat3 aligned_mat3; - typedef aligned_lowp_mat4 aligned_mat4; - typedef packed_lowp_mat2 packed_mat2; - typedef packed_lowp_mat3 packed_mat3; - typedef packed_lowp_mat4 packed_mat4; - - typedef aligned_lowp_mat2x2 aligned_mat2x2; - typedef aligned_lowp_mat2x3 aligned_mat2x3; - typedef aligned_lowp_mat2x4 aligned_mat2x4; - typedef aligned_lowp_mat3x2 aligned_mat3x2; - typedef aligned_lowp_mat3x3 aligned_mat3x3; - typedef aligned_lowp_mat3x4 aligned_mat3x4; - typedef aligned_lowp_mat4x2 aligned_mat4x2; - typedef aligned_lowp_mat4x3 aligned_mat4x3; - typedef aligned_lowp_mat4x4 aligned_mat4x4; - typedef packed_lowp_mat2x2 packed_mat2x2; - typedef packed_lowp_mat2x3 packed_mat2x3; - typedef packed_lowp_mat2x4 packed_mat2x4; - typedef packed_lowp_mat3x2 packed_mat3x2; - typedef packed_lowp_mat3x3 packed_mat3x3; - typedef packed_lowp_mat3x4 packed_mat3x4; - typedef packed_lowp_mat4x2 packed_mat4x2; - typedef packed_lowp_mat4x3 packed_mat4x3; - typedef packed_lowp_mat4x4 packed_mat4x4; -#elif(defined(GLM_PRECISION_MEDIUMP_FLOAT)) - typedef aligned_mediump_vec1 aligned_vec1; - typedef aligned_mediump_vec2 aligned_vec2; - typedef aligned_mediump_vec3 aligned_vec3; - typedef aligned_mediump_vec4 aligned_vec4; - typedef packed_mediump_vec1 packed_vec1; - typedef packed_mediump_vec2 packed_vec2; - typedef packed_mediump_vec3 packed_vec3; - typedef packed_mediump_vec4 packed_vec4; - - typedef aligned_mediump_mat2 aligned_mat2; - typedef aligned_mediump_mat3 aligned_mat3; - typedef aligned_mediump_mat4 aligned_mat4; - typedef packed_mediump_mat2 packed_mat2; - typedef packed_mediump_mat3 packed_mat3; - typedef packed_mediump_mat4 packed_mat4; - - typedef aligned_mediump_mat2x2 aligned_mat2x2; - typedef aligned_mediump_mat2x3 aligned_mat2x3; - typedef aligned_mediump_mat2x4 aligned_mat2x4; - typedef aligned_mediump_mat3x2 aligned_mat3x2; - typedef aligned_mediump_mat3x3 aligned_mat3x3; - typedef aligned_mediump_mat3x4 aligned_mat3x4; - typedef aligned_mediump_mat4x2 aligned_mat4x2; - typedef aligned_mediump_mat4x3 aligned_mat4x3; - typedef aligned_mediump_mat4x4 aligned_mat4x4; - typedef packed_mediump_mat2x2 packed_mat2x2; - typedef packed_mediump_mat2x3 packed_mat2x3; - typedef packed_mediump_mat2x4 packed_mat2x4; - typedef packed_mediump_mat3x2 packed_mat3x2; - typedef packed_mediump_mat3x3 packed_mat3x3; - typedef packed_mediump_mat3x4 packed_mat3x4; - typedef packed_mediump_mat4x2 packed_mat4x2; - typedef packed_mediump_mat4x3 packed_mat4x3; - typedef packed_mediump_mat4x4 packed_mat4x4; -#else //defined(GLM_PRECISION_HIGHP_FLOAT) - /// 1 component vector aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_vec1 aligned_vec1; - - /// 2 components vector aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_vec2 aligned_vec2; - - /// 3 components vector aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_vec3 aligned_vec3; - - /// 4 components vector aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_vec4 aligned_vec4; - - /// 1 component vector tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_vec1 packed_vec1; - - /// 2 components vector tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_vec2 packed_vec2; - - /// 3 components vector tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_vec3 packed_vec3; - - /// 4 components vector tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_vec4 packed_vec4; - - /// 2 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat2 aligned_mat2; - - /// 3 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat3 aligned_mat3; - - /// 4 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat4 aligned_mat4; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat2 packed_mat2; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat3 packed_mat3; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat4 packed_mat4; - - /// 2 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat2x2 aligned_mat2x2; - - /// 2 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat2x3 aligned_mat2x3; - - /// 2 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat2x4 aligned_mat2x4; - - /// 3 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat3x2 aligned_mat3x2; - - /// 3 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat3x3 aligned_mat3x3; - - /// 3 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat3x4 aligned_mat3x4; - - /// 4 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat4x2 aligned_mat4x2; - - /// 4 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat4x3 aligned_mat4x3; - - /// 4 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat4x4 aligned_mat4x4; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat2x2 packed_mat2x2; - - /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat2x3 packed_mat2x3; - - /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat2x4 packed_mat2x4; - - /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat3x2 packed_mat3x2; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat3x3 packed_mat3x3; - - /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat3x4 packed_mat3x4; - - /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat4x2 packed_mat4x2; - - /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat4x3 packed_mat4x3; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat4x4 packed_mat4x4; -#endif//GLM_PRECISION - -#if(defined(GLM_PRECISION_LOWP_DOUBLE)) - typedef aligned_lowp_dvec1 aligned_dvec1; - typedef aligned_lowp_dvec2 aligned_dvec2; - typedef aligned_lowp_dvec3 aligned_dvec3; - typedef aligned_lowp_dvec4 aligned_dvec4; - typedef packed_lowp_dvec1 packed_dvec1; - typedef packed_lowp_dvec2 packed_dvec2; - typedef packed_lowp_dvec3 packed_dvec3; - typedef packed_lowp_dvec4 packed_dvec4; - - typedef aligned_lowp_dmat2 aligned_dmat2; - typedef aligned_lowp_dmat3 aligned_dmat3; - typedef aligned_lowp_dmat4 aligned_dmat4; - typedef packed_lowp_dmat2 packed_dmat2; - typedef packed_lowp_dmat3 packed_dmat3; - typedef packed_lowp_dmat4 packed_dmat4; - - typedef aligned_lowp_dmat2x2 aligned_dmat2x2; - typedef aligned_lowp_dmat2x3 aligned_dmat2x3; - typedef aligned_lowp_dmat2x4 aligned_dmat2x4; - typedef aligned_lowp_dmat3x2 aligned_dmat3x2; - typedef aligned_lowp_dmat3x3 aligned_dmat3x3; - typedef aligned_lowp_dmat3x4 aligned_dmat3x4; - typedef aligned_lowp_dmat4x2 aligned_dmat4x2; - typedef aligned_lowp_dmat4x3 aligned_dmat4x3; - typedef aligned_lowp_dmat4x4 aligned_dmat4x4; - typedef packed_lowp_dmat2x2 packed_dmat2x2; - typedef packed_lowp_dmat2x3 packed_dmat2x3; - typedef packed_lowp_dmat2x4 packed_dmat2x4; - typedef packed_lowp_dmat3x2 packed_dmat3x2; - typedef packed_lowp_dmat3x3 packed_dmat3x3; - typedef packed_lowp_dmat3x4 packed_dmat3x4; - typedef packed_lowp_dmat4x2 packed_dmat4x2; - typedef packed_lowp_dmat4x3 packed_dmat4x3; - typedef packed_lowp_dmat4x4 packed_dmat4x4; -#elif(defined(GLM_PRECISION_MEDIUMP_DOUBLE)) - typedef aligned_mediump_dvec1 aligned_dvec1; - typedef aligned_mediump_dvec2 aligned_dvec2; - typedef aligned_mediump_dvec3 aligned_dvec3; - typedef aligned_mediump_dvec4 aligned_dvec4; - typedef packed_mediump_dvec1 packed_dvec1; - typedef packed_mediump_dvec2 packed_dvec2; - typedef packed_mediump_dvec3 packed_dvec3; - typedef packed_mediump_dvec4 packed_dvec4; - - typedef aligned_mediump_dmat2 aligned_dmat2; - typedef aligned_mediump_dmat3 aligned_dmat3; - typedef aligned_mediump_dmat4 aligned_dmat4; - typedef packed_mediump_dmat2 packed_dmat2; - typedef packed_mediump_dmat3 packed_dmat3; - typedef packed_mediump_dmat4 packed_dmat4; - - typedef aligned_mediump_dmat2x2 aligned_dmat2x2; - typedef aligned_mediump_dmat2x3 aligned_dmat2x3; - typedef aligned_mediump_dmat2x4 aligned_dmat2x4; - typedef aligned_mediump_dmat3x2 aligned_dmat3x2; - typedef aligned_mediump_dmat3x3 aligned_dmat3x3; - typedef aligned_mediump_dmat3x4 aligned_dmat3x4; - typedef aligned_mediump_dmat4x2 aligned_dmat4x2; - typedef aligned_mediump_dmat4x3 aligned_dmat4x3; - typedef aligned_mediump_dmat4x4 aligned_dmat4x4; - typedef packed_mediump_dmat2x2 packed_dmat2x2; - typedef packed_mediump_dmat2x3 packed_dmat2x3; - typedef packed_mediump_dmat2x4 packed_dmat2x4; - typedef packed_mediump_dmat3x2 packed_dmat3x2; - typedef packed_mediump_dmat3x3 packed_dmat3x3; - typedef packed_mediump_dmat3x4 packed_dmat3x4; - typedef packed_mediump_dmat4x2 packed_dmat4x2; - typedef packed_mediump_dmat4x3 packed_dmat4x3; - typedef packed_mediump_dmat4x4 packed_dmat4x4; -#else //defined(GLM_PRECISION_HIGHP_DOUBLE) - /// 1 component vector aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dvec1 aligned_dvec1; - - /// 2 components vector aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dvec2 aligned_dvec2; - - /// 3 components vector aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dvec3 aligned_dvec3; - - /// 4 components vector aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dvec4 aligned_dvec4; - - /// 1 component vector tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dvec1 packed_dvec1; - - /// 2 components vector tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dvec2 packed_dvec2; - - /// 3 components vector tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dvec3 packed_dvec3; - - /// 4 components vector tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dvec4 packed_dvec4; - - /// 2 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat2 aligned_dmat2; - - /// 3 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat3 aligned_dmat3; - - /// 4 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat4 aligned_dmat4; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat2 packed_dmat2; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat3 packed_dmat3; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat4 packed_dmat4; - - /// 2 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat2x2 aligned_dmat2x2; - - /// 2 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat2x3 aligned_dmat2x3; - - /// 2 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat2x4 aligned_dmat2x4; - - /// 3 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat3x2 aligned_dmat3x2; - - /// 3 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat3x3 aligned_dmat3x3; - - /// 3 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat3x4 aligned_dmat3x4; - - /// 4 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat4x2 aligned_dmat4x2; - - /// 4 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat4x3 aligned_dmat4x3; - - /// 4 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat4x4 aligned_dmat4x4; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat2x2 packed_dmat2x2; - - /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat2x3 packed_dmat2x3; - - /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat2x4 packed_dmat2x4; - - /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat3x2 packed_dmat3x2; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat3x3 packed_dmat3x3; - - /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat3x4 packed_dmat3x4; - - /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat4x2 packed_dmat4x2; - - /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat4x3 packed_dmat4x3; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat4x4 packed_dmat4x4; -#endif//GLM_PRECISION - -#if(defined(GLM_PRECISION_LOWP_INT)) - typedef aligned_lowp_ivec1 aligned_ivec1; - typedef aligned_lowp_ivec2 aligned_ivec2; - typedef aligned_lowp_ivec3 aligned_ivec3; - typedef aligned_lowp_ivec4 aligned_ivec4; -#elif(defined(GLM_PRECISION_MEDIUMP_INT)) - typedef aligned_mediump_ivec1 aligned_ivec1; - typedef aligned_mediump_ivec2 aligned_ivec2; - typedef aligned_mediump_ivec3 aligned_ivec3; - typedef aligned_mediump_ivec4 aligned_ivec4; -#else //defined(GLM_PRECISION_HIGHP_INT) - /// 1 component vector aligned in memory of signed integer numbers. - typedef aligned_highp_ivec1 aligned_ivec1; - - /// 2 components vector aligned in memory of signed integer numbers. - typedef aligned_highp_ivec2 aligned_ivec2; - - /// 3 components vector aligned in memory of signed integer numbers. - typedef aligned_highp_ivec3 aligned_ivec3; - - /// 4 components vector aligned in memory of signed integer numbers. - typedef aligned_highp_ivec4 aligned_ivec4; - - /// 1 component vector tightly packed in memory of signed integer numbers. - typedef packed_highp_ivec1 packed_ivec1; - - /// 2 components vector tightly packed in memory of signed integer numbers. - typedef packed_highp_ivec2 packed_ivec2; - - /// 3 components vector tightly packed in memory of signed integer numbers. - typedef packed_highp_ivec3 packed_ivec3; - - /// 4 components vector tightly packed in memory of signed integer numbers. - typedef packed_highp_ivec4 packed_ivec4; -#endif//GLM_PRECISION - - // -- Unsigned integer definition -- - -#if(defined(GLM_PRECISION_LOWP_UINT)) - typedef aligned_lowp_uvec1 aligned_uvec1; - typedef aligned_lowp_uvec2 aligned_uvec2; - typedef aligned_lowp_uvec3 aligned_uvec3; - typedef aligned_lowp_uvec4 aligned_uvec4; -#elif(defined(GLM_PRECISION_MEDIUMP_UINT)) - typedef aligned_mediump_uvec1 aligned_uvec1; - typedef aligned_mediump_uvec2 aligned_uvec2; - typedef aligned_mediump_uvec3 aligned_uvec3; - typedef aligned_mediump_uvec4 aligned_uvec4; -#else //defined(GLM_PRECISION_HIGHP_UINT) - /// 1 component vector aligned in memory of unsigned integer numbers. - typedef aligned_highp_uvec1 aligned_uvec1; - - /// 2 components vector aligned in memory of unsigned integer numbers. - typedef aligned_highp_uvec2 aligned_uvec2; - - /// 3 components vector aligned in memory of unsigned integer numbers. - typedef aligned_highp_uvec3 aligned_uvec3; - - /// 4 components vector aligned in memory of unsigned integer numbers. - typedef aligned_highp_uvec4 aligned_uvec4; - - /// 1 component vector tightly packed in memory of unsigned integer numbers. - typedef packed_highp_uvec1 packed_uvec1; - - /// 2 components vector tightly packed in memory of unsigned integer numbers. - typedef packed_highp_uvec2 packed_uvec2; - - /// 3 components vector tightly packed in memory of unsigned integer numbers. - typedef packed_highp_uvec3 packed_uvec3; - - /// 4 components vector tightly packed in memory of unsigned integer numbers. - typedef packed_highp_uvec4 packed_uvec4; -#endif//GLM_PRECISION - -#if(defined(GLM_PRECISION_LOWP_BOOL)) - typedef aligned_lowp_bvec1 aligned_bvec1; - typedef aligned_lowp_bvec2 aligned_bvec2; - typedef aligned_lowp_bvec3 aligned_bvec3; - typedef aligned_lowp_bvec4 aligned_bvec4; -#elif(defined(GLM_PRECISION_MEDIUMP_BOOL)) - typedef aligned_mediump_bvec1 aligned_bvec1; - typedef aligned_mediump_bvec2 aligned_bvec2; - typedef aligned_mediump_bvec3 aligned_bvec3; - typedef aligned_mediump_bvec4 aligned_bvec4; -#else //defined(GLM_PRECISION_HIGHP_BOOL) - /// 1 component vector aligned in memory of bool values. - typedef aligned_highp_bvec1 aligned_bvec1; - - /// 2 components vector aligned in memory of bool values. - typedef aligned_highp_bvec2 aligned_bvec2; - - /// 3 components vector aligned in memory of bool values. - typedef aligned_highp_bvec3 aligned_bvec3; - - /// 4 components vector aligned in memory of bool values. - typedef aligned_highp_bvec4 aligned_bvec4; - - /// 1 components vector tightly packed in memory of bool values. - typedef packed_highp_bvec1 packed_bvec1; - - /// 2 components vector tightly packed in memory of bool values. - typedef packed_highp_bvec2 packed_bvec2; - - /// 3 components vector tightly packed in memory of bool values. - typedef packed_highp_bvec3 packed_bvec3; - - /// 4 components vector tightly packed in memory of bool values. - typedef packed_highp_bvec4 packed_bvec4; -#endif//GLM_PRECISION - - /// @} -}//namespace glm diff --git a/third_party/glm/gtc/type_precision.hpp b/third_party/glm/gtc/type_precision.hpp deleted file mode 100755 index 250bc4f..0000000 --- a/third_party/glm/gtc/type_precision.hpp +++ /dev/null @@ -1,2138 +0,0 @@ -/// @ref gtc_type_precision -/// @file glm/gtc/type_precision.hpp -/// -/// @see core (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtc_type_precision GLM_GTC_type_precision -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Defines specific C++-based qualifier types. - -#pragma once - -// Dependency: -#include "../gtc/quaternion.hpp" -#include "../gtc/vec1.hpp" -#include "../ext/scalar_int_sized.hpp" -#include "../ext/scalar_uint_sized.hpp" -#include "../detail/type_vec2.hpp" -#include "../detail/type_vec3.hpp" -#include "../detail/type_vec4.hpp" -#include "../detail/type_mat2x2.hpp" -#include "../detail/type_mat2x3.hpp" -#include "../detail/type_mat2x4.hpp" -#include "../detail/type_mat3x2.hpp" -#include "../detail/type_mat3x3.hpp" -#include "../detail/type_mat3x4.hpp" -#include "../detail/type_mat4x2.hpp" -#include "../detail/type_mat4x3.hpp" -#include "../detail/type_mat4x4.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_type_precision extension included") -#endif - -namespace glm -{ - /////////////////////////// - // Signed int vector types - - /// @addtogroup gtc_type_precision - /// @{ - - /// Low qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 lowp_int8; - - /// Low qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 lowp_int16; - - /// Low qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 lowp_int32; - - /// Low qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 lowp_int64; - - /// Low qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 lowp_int8_t; - - /// Low qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 lowp_int16_t; - - /// Low qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 lowp_int32_t; - - /// Low qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 lowp_int64_t; - - /// Low qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 lowp_i8; - - /// Low qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 lowp_i16; - - /// Low qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 lowp_i32; - - /// Low qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 lowp_i64; - - /// Medium qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 mediump_int8; - - /// Medium qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 mediump_int16; - - /// Medium qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 mediump_int32; - - /// Medium qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 mediump_int64; - - /// Medium qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 mediump_int8_t; - - /// Medium qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 mediump_int16_t; - - /// Medium qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 mediump_int32_t; - - /// Medium qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 mediump_int64_t; - - /// Medium qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 mediump_i8; - - /// Medium qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 mediump_i16; - - /// Medium qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 mediump_i32; - - /// Medium qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 mediump_i64; - - /// High qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 highp_int8; - - /// High qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 highp_int16; - - /// High qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 highp_int32; - - /// High qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 highp_int64; - - /// High qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 highp_int8_t; - - /// High qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 highp_int16_t; - - /// 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 highp_int32_t; - - /// High qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 highp_int64_t; - - /// High qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 highp_i8; - - /// High qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 highp_i16; - - /// High qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 highp_i32; - - /// High qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 highp_i64; - - -#if GLM_HAS_EXTENDED_INTEGER_TYPE - using std::int8_t; - using std::int16_t; - using std::int32_t; - using std::int64_t; -#else - /// 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 int8_t; - - /// 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 int16_t; - - /// 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 int32_t; - - /// 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 int64_t; -#endif - - /// 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 i8; - - /// 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 i16; - - /// 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 i32; - - /// 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 i64; - - - - /// Low qualifier 8 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i8, lowp> lowp_i8vec1; - - /// Low qualifier 8 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i8, lowp> lowp_i8vec2; - - /// Low qualifier 8 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i8, lowp> lowp_i8vec3; - - /// Low qualifier 8 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i8, lowp> lowp_i8vec4; - - - /// Medium qualifier 8 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i8, mediump> mediump_i8vec1; - - /// Medium qualifier 8 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i8, mediump> mediump_i8vec2; - - /// Medium qualifier 8 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i8, mediump> mediump_i8vec3; - - /// Medium qualifier 8 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i8, mediump> mediump_i8vec4; - - - /// High qualifier 8 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i8, highp> highp_i8vec1; - - /// High qualifier 8 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i8, highp> highp_i8vec2; - - /// High qualifier 8 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i8, highp> highp_i8vec3; - - /// High qualifier 8 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i8, highp> highp_i8vec4; - - - - /// 8 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i8, defaultp> i8vec1; - - /// 8 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i8, defaultp> i8vec2; - - /// 8 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i8, defaultp> i8vec3; - - /// 8 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i8, defaultp> i8vec4; - - - - - - /// Low qualifier 16 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i16, lowp> lowp_i16vec1; - - /// Low qualifier 16 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i16, lowp> lowp_i16vec2; - - /// Low qualifier 16 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i16, lowp> lowp_i16vec3; - - /// Low qualifier 16 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i16, lowp> lowp_i16vec4; - - - /// Medium qualifier 16 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i16, mediump> mediump_i16vec1; - - /// Medium qualifier 16 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i16, mediump> mediump_i16vec2; - - /// Medium qualifier 16 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i16, mediump> mediump_i16vec3; - - /// Medium qualifier 16 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i16, mediump> mediump_i16vec4; - - - /// High qualifier 16 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i16, highp> highp_i16vec1; - - /// High qualifier 16 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i16, highp> highp_i16vec2; - - /// High qualifier 16 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i16, highp> highp_i16vec3; - - /// High qualifier 16 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i16, highp> highp_i16vec4; - - - - - /// 16 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i16, defaultp> i16vec1; - - /// 16 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i16, defaultp> i16vec2; - - /// 16 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i16, defaultp> i16vec3; - - /// 16 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i16, defaultp> i16vec4; - - - - /// Low qualifier 32 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i32, lowp> lowp_i32vec1; - - /// Low qualifier 32 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i32, lowp> lowp_i32vec2; - - /// Low qualifier 32 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i32, lowp> lowp_i32vec3; - - /// Low qualifier 32 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i32, lowp> lowp_i32vec4; - - - /// Medium qualifier 32 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i32, mediump> mediump_i32vec1; - - /// Medium qualifier 32 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i32, mediump> mediump_i32vec2; - - /// Medium qualifier 32 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i32, mediump> mediump_i32vec3; - - /// Medium qualifier 32 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i32, mediump> mediump_i32vec4; - - - /// High qualifier 32 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i32, highp> highp_i32vec1; - - /// High qualifier 32 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i32, highp> highp_i32vec2; - - /// High qualifier 32 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i32, highp> highp_i32vec3; - - /// High qualifier 32 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i32, highp> highp_i32vec4; - - - /// 32 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i32, defaultp> i32vec1; - - /// 32 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i32, defaultp> i32vec2; - - /// 32 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i32, defaultp> i32vec3; - - /// 32 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i32, defaultp> i32vec4; - - - - - /// Low qualifier 64 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i64, lowp> lowp_i64vec1; - - /// Low qualifier 64 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i64, lowp> lowp_i64vec2; - - /// Low qualifier 64 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i64, lowp> lowp_i64vec3; - - /// Low qualifier 64 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i64, lowp> lowp_i64vec4; - - - /// Medium qualifier 64 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i64, mediump> mediump_i64vec1; - - /// Medium qualifier 64 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i64, mediump> mediump_i64vec2; - - /// Medium qualifier 64 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i64, mediump> mediump_i64vec3; - - /// Medium qualifier 64 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i64, mediump> mediump_i64vec4; - - - /// High qualifier 64 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i64, highp> highp_i64vec1; - - /// High qualifier 64 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i64, highp> highp_i64vec2; - - /// High qualifier 64 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i64, highp> highp_i64vec3; - - /// High qualifier 64 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i64, highp> highp_i64vec4; - - - /// 64 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i64, defaultp> i64vec1; - - /// 64 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i64, defaultp> i64vec2; - - /// 64 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i64, defaultp> i64vec3; - - /// 64 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i64, defaultp> i64vec4; - - - ///////////////////////////// - // Unsigned int vector types - - /// Low qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 lowp_uint8; - - /// Low qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 lowp_uint16; - - /// Low qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 lowp_uint32; - - /// Low qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 lowp_uint64; - - /// Low qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 lowp_uint8_t; - - /// Low qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 lowp_uint16_t; - - /// Low qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 lowp_uint32_t; - - /// Low qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 lowp_uint64_t; - - /// Low qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 lowp_u8; - - /// Low qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 lowp_u16; - - /// Low qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 lowp_u32; - - /// Low qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 lowp_u64; - - /// Medium qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 mediump_uint8; - - /// Medium qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 mediump_uint16; - - /// Medium qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 mediump_uint32; - - /// Medium qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 mediump_uint64; - - /// Medium qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 mediump_uint8_t; - - /// Medium qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 mediump_uint16_t; - - /// Medium qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 mediump_uint32_t; - - /// Medium qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 mediump_uint64_t; - - /// Medium qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 mediump_u8; - - /// Medium qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 mediump_u16; - - /// Medium qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 mediump_u32; - - /// Medium qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 mediump_u64; - - /// High qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 highp_uint8; - - /// High qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 highp_uint16; - - /// High qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 highp_uint32; - - /// High qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 highp_uint64; - - /// High qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 highp_uint8_t; - - /// High qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 highp_uint16_t; - - /// High qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 highp_uint32_t; - - /// High qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 highp_uint64_t; - - /// High qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 highp_u8; - - /// High qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 highp_u16; - - /// High qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 highp_u32; - - /// High qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 highp_u64; - -#if GLM_HAS_EXTENDED_INTEGER_TYPE - using std::uint8_t; - using std::uint16_t; - using std::uint32_t; - using std::uint64_t; -#else - /// Default qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 uint8_t; - - /// Default qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 uint16_t; - - /// Default qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 uint32_t; - - /// Default qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 uint64_t; -#endif - - /// Default qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 u8; - - /// Default qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 u16; - - /// Default qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 u32; - - /// Default qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 u64; - - - - - - ////////////////////// - // Float vector types - - /// Single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float float32; - - /// Double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef double float64; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_float32; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_float64; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_float32_t; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_float64_t; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_f32; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_f64; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_float32; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_float64; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_float32_t; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_float64_t; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_f32; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_f64; - - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_float32; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_float64; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_float32_t; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_float64_t; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_f32; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_f64; - - - /// Medium 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 mediump_float32; - - /// Medium 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 mediump_float64; - - /// Medium 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 mediump_float32_t; - - /// Medium 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 mediump_float64_t; - - /// Medium 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 mediump_f32; - - /// Medium 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 mediump_f64; - - - /// High 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 highp_float32; - - /// High 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 highp_float64; - - /// High 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 highp_float32_t; - - /// High 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 highp_float64_t; - - /// High 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 highp_f32; - - /// High 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 highp_f64; - - -#if(defined(GLM_PRECISION_LOWP_FLOAT)) - /// Default 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef lowp_float32_t float32_t; - - /// Default 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef lowp_float64_t float64_t; - - /// Default 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef lowp_f32 f32; - - /// Default 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef lowp_f64 f64; - -#elif(defined(GLM_PRECISION_MEDIUMP_FLOAT)) - /// Default 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef mediump_float32 float32_t; - - /// Default 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef mediump_float64 float64_t; - - /// Default 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef mediump_float32 f32; - - /// Default 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef mediump_float64 f64; - -#else//(defined(GLM_PRECISION_HIGHP_FLOAT)) - - /// Default 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef highp_float32_t float32_t; - - /// Default 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef highp_float64_t float64_t; - - /// Default 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef highp_float32_t f32; - - /// Default 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef highp_float64_t f64; -#endif - - - /// Low single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, float, lowp> lowp_fvec1; - - /// Low single-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, float, lowp> lowp_fvec2; - - /// Low single-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, float, lowp> lowp_fvec3; - - /// Low single-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, float, lowp> lowp_fvec4; - - - /// Medium single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, float, mediump> mediump_fvec1; - - /// Medium Single-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, float, mediump> mediump_fvec2; - - /// Medium Single-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, float, mediump> mediump_fvec3; - - /// Medium Single-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, float, mediump> mediump_fvec4; - - - /// High single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, float, highp> highp_fvec1; - - /// High Single-qualifier floating-point vector of 2 components. - /// @see core_precision - typedef vec<2, float, highp> highp_fvec2; - - /// High Single-qualifier floating-point vector of 3 components. - /// @see core_precision - typedef vec<3, float, highp> highp_fvec3; - - /// High Single-qualifier floating-point vector of 4 components. - /// @see core_precision - typedef vec<4, float, highp> highp_fvec4; - - - /// Low single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f32, lowp> lowp_f32vec1; - - /// Low single-qualifier floating-point vector of 2 components. - /// @see core_precision - typedef vec<2, f32, lowp> lowp_f32vec2; - - /// Low single-qualifier floating-point vector of 3 components. - /// @see core_precision - typedef vec<3, f32, lowp> lowp_f32vec3; - - /// Low single-qualifier floating-point vector of 4 components. - /// @see core_precision - typedef vec<4, f32, lowp> lowp_f32vec4; - - /// Medium single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f32, mediump> mediump_f32vec1; - - /// Medium single-qualifier floating-point vector of 2 components. - /// @see core_precision - typedef vec<2, f32, mediump> mediump_f32vec2; - - /// Medium single-qualifier floating-point vector of 3 components. - /// @see core_precision - typedef vec<3, f32, mediump> mediump_f32vec3; - - /// Medium single-qualifier floating-point vector of 4 components. - /// @see core_precision - typedef vec<4, f32, mediump> mediump_f32vec4; - - /// High single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f32, highp> highp_f32vec1; - - /// High single-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, f32, highp> highp_f32vec2; - - /// High single-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, f32, highp> highp_f32vec3; - - /// High single-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, f32, highp> highp_f32vec4; - - - /// Low double-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f64, lowp> lowp_f64vec1; - - /// Low double-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, f64, lowp> lowp_f64vec2; - - /// Low double-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, f64, lowp> lowp_f64vec3; - - /// Low double-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, f64, lowp> lowp_f64vec4; - - /// Medium double-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f64, mediump> mediump_f64vec1; - - /// Medium double-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, f64, mediump> mediump_f64vec2; - - /// Medium double-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, f64, mediump> mediump_f64vec3; - - /// Medium double-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, f64, mediump> mediump_f64vec4; - - /// High double-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f64, highp> highp_f64vec1; - - /// High double-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, f64, highp> highp_f64vec2; - - /// High double-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, f64, highp> highp_f64vec3; - - /// High double-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, f64, highp> highp_f64vec4; - - - - ////////////////////// - // Float matrix types - - /// Low single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef lowp_f32 lowp_fmat1x1; - - /// Low single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, lowp> lowp_fmat2x2; - - /// Low single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, lowp> lowp_fmat2x3; - - /// Low single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, lowp> lowp_fmat2x4; - - /// Low single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, lowp> lowp_fmat3x2; - - /// Low single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, lowp> lowp_fmat3x3; - - /// Low single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, lowp> lowp_fmat3x4; - - /// Low single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, lowp> lowp_fmat4x2; - - /// Low single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, lowp> lowp_fmat4x3; - - /// Low single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, lowp> lowp_fmat4x4; - - /// Low single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef lowp_fmat1x1 lowp_fmat1; - - /// Low single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef lowp_fmat2x2 lowp_fmat2; - - /// Low single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef lowp_fmat3x3 lowp_fmat3; - - /// Low single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef lowp_fmat4x4 lowp_fmat4; - - - /// Medium single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef mediump_f32 mediump_fmat1x1; - - /// Medium single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, mediump> mediump_fmat2x2; - - /// Medium single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, mediump> mediump_fmat2x3; - - /// Medium single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, mediump> mediump_fmat2x4; - - /// Medium single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, mediump> mediump_fmat3x2; - - /// Medium single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, mediump> mediump_fmat3x3; - - /// Medium single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, mediump> mediump_fmat3x4; - - /// Medium single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, mediump> mediump_fmat4x2; - - /// Medium single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, mediump> mediump_fmat4x3; - - /// Medium single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, mediump> mediump_fmat4x4; - - /// Medium single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef mediump_fmat1x1 mediump_fmat1; - - /// Medium single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mediump_fmat2x2 mediump_fmat2; - - /// Medium single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mediump_fmat3x3 mediump_fmat3; - - /// Medium single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mediump_fmat4x4 mediump_fmat4; - - - /// High single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef highp_f32 highp_fmat1x1; - - /// High single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, highp> highp_fmat2x2; - - /// High single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, highp> highp_fmat2x3; - - /// High single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, highp> highp_fmat2x4; - - /// High single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, highp> highp_fmat3x2; - - /// High single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, highp> highp_fmat3x3; - - /// High single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, highp> highp_fmat3x4; - - /// High single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, highp> highp_fmat4x2; - - /// High single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, highp> highp_fmat4x3; - - /// High single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, highp> highp_fmat4x4; - - /// High single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef highp_fmat1x1 highp_fmat1; - - /// High single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef highp_fmat2x2 highp_fmat2; - - /// High single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef highp_fmat3x3 highp_fmat3; - - /// High single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef highp_fmat4x4 highp_fmat4; - - - /// Low single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f32 lowp_f32mat1x1; - - /// Low single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, lowp> lowp_f32mat2x2; - - /// Low single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, lowp> lowp_f32mat2x3; - - /// Low single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, lowp> lowp_f32mat2x4; - - /// Low single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, lowp> lowp_f32mat3x2; - - /// Low single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, lowp> lowp_f32mat3x3; - - /// Low single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, lowp> lowp_f32mat3x4; - - /// Low single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, lowp> lowp_f32mat4x2; - - /// Low single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, lowp> lowp_f32mat4x3; - - /// Low single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, lowp> lowp_f32mat4x4; - - /// Low single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef detail::tmat1x1 lowp_f32mat1; - - /// Low single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef lowp_f32mat2x2 lowp_f32mat2; - - /// Low single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef lowp_f32mat3x3 lowp_f32mat3; - - /// Low single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef lowp_f32mat4x4 lowp_f32mat4; - - - /// High single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f32 mediump_f32mat1x1; - - /// Low single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, mediump> mediump_f32mat2x2; - - /// Medium single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, mediump> mediump_f32mat2x3; - - /// Medium single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, mediump> mediump_f32mat2x4; - - /// Medium single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, mediump> mediump_f32mat3x2; - - /// Medium single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, mediump> mediump_f32mat3x3; - - /// Medium single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, mediump> mediump_f32mat3x4; - - /// Medium single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, mediump> mediump_f32mat4x2; - - /// Medium single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, mediump> mediump_f32mat4x3; - - /// Medium single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, mediump> mediump_f32mat4x4; - - /// Medium single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef detail::tmat1x1 f32mat1; - - /// Medium single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mediump_f32mat2x2 mediump_f32mat2; - - /// Medium single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mediump_f32mat3x3 mediump_f32mat3; - - /// Medium single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mediump_f32mat4x4 mediump_f32mat4; - - - /// High single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f32 highp_f32mat1x1; - - /// High single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, highp> highp_f32mat2x2; - - /// High single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, highp> highp_f32mat2x3; - - /// High single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, highp> highp_f32mat2x4; - - /// High single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, highp> highp_f32mat3x2; - - /// High single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, highp> highp_f32mat3x3; - - /// High single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, highp> highp_f32mat3x4; - - /// High single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, highp> highp_f32mat4x2; - - /// High single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, highp> highp_f32mat4x3; - - /// High single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, highp> highp_f32mat4x4; - - /// High single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef detail::tmat1x1 f32mat1; - - /// High single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef highp_f32mat2x2 highp_f32mat2; - - /// High single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef highp_f32mat3x3 highp_f32mat3; - - /// High single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef highp_f32mat4x4 highp_f32mat4; - - - /// Low double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f64 lowp_f64mat1x1; - - /// Low double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f64, lowp> lowp_f64mat2x2; - - /// Low double-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f64, lowp> lowp_f64mat2x3; - - /// Low double-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f64, lowp> lowp_f64mat2x4; - - /// Low double-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f64, lowp> lowp_f64mat3x2; - - /// Low double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f64, lowp> lowp_f64mat3x3; - - /// Low double-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f64, lowp> lowp_f64mat3x4; - - /// Low double-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f64, lowp> lowp_f64mat4x2; - - /// Low double-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f64, lowp> lowp_f64mat4x3; - - /// Low double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f64, lowp> lowp_f64mat4x4; - - /// Low double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef lowp_f64mat1x1 lowp_f64mat1; - - /// Low double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef lowp_f64mat2x2 lowp_f64mat2; - - /// Low double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef lowp_f64mat3x3 lowp_f64mat3; - - /// Low double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef lowp_f64mat4x4 lowp_f64mat4; - - - /// Medium double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f64 Highp_f64mat1x1; - - /// Medium double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f64, mediump> mediump_f64mat2x2; - - /// Medium double-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f64, mediump> mediump_f64mat2x3; - - /// Medium double-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f64, mediump> mediump_f64mat2x4; - - /// Medium double-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f64, mediump> mediump_f64mat3x2; - - /// Medium double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f64, mediump> mediump_f64mat3x3; - - /// Medium double-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f64, mediump> mediump_f64mat3x4; - - /// Medium double-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f64, mediump> mediump_f64mat4x2; - - /// Medium double-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f64, mediump> mediump_f64mat4x3; - - /// Medium double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f64, mediump> mediump_f64mat4x4; - - /// Medium double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef mediump_f64mat1x1 mediump_f64mat1; - - /// Medium double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mediump_f64mat2x2 mediump_f64mat2; - - /// Medium double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mediump_f64mat3x3 mediump_f64mat3; - - /// Medium double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mediump_f64mat4x4 mediump_f64mat4; - - /// High double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f64 highp_f64mat1x1; - - /// High double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f64, highp> highp_f64mat2x2; - - /// High double-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f64, highp> highp_f64mat2x3; - - /// High double-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f64, highp> highp_f64mat2x4; - - /// High double-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f64, highp> highp_f64mat3x2; - - /// High double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f64, highp> highp_f64mat3x3; - - /// High double-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f64, highp> highp_f64mat3x4; - - /// High double-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f64, highp> highp_f64mat4x2; - - /// High double-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f64, highp> highp_f64mat4x3; - - /// High double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f64, highp> highp_f64mat4x4; - - /// High double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef highp_f64mat1x1 highp_f64mat1; - - /// High double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef highp_f64mat2x2 highp_f64mat2; - - /// High double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef highp_f64mat3x3 highp_f64mat3; - - /// High double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef highp_f64mat4x4 highp_f64mat4; - - - - - /// Low qualifier 8 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u8, lowp> lowp_u8vec1; - - /// Low qualifier 8 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u8, lowp> lowp_u8vec2; - - /// Low qualifier 8 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u8, lowp> lowp_u8vec3; - - /// Low qualifier 8 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u8, lowp> lowp_u8vec4; - - - /// Medium qualifier 8 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u8, mediump> mediump_u8vec1; - - /// Medium qualifier 8 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u8, mediump> mediump_u8vec2; - - /// Medium qualifier 8 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u8, mediump> mediump_u8vec3; - - /// Medium qualifier 8 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u8, mediump> mediump_u8vec4; - - - /// High qualifier 8 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u8, highp> highp_u8vec1; - - /// High qualifier 8 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u8, highp> highp_u8vec2; - - /// High qualifier 8 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u8, highp> highp_u8vec3; - - /// High qualifier 8 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u8, highp> highp_u8vec4; - - - - /// Default qualifier 8 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u8, defaultp> u8vec1; - - /// Default qualifier 8 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u8, defaultp> u8vec2; - - /// Default qualifier 8 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u8, defaultp> u8vec3; - - /// Default qualifier 8 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u8, defaultp> u8vec4; - - - - - /// Low qualifier 16 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u16, lowp> lowp_u16vec1; - - /// Low qualifier 16 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u16, lowp> lowp_u16vec2; - - /// Low qualifier 16 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u16, lowp> lowp_u16vec3; - - /// Low qualifier 16 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u16, lowp> lowp_u16vec4; - - - /// Medium qualifier 16 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u16, mediump> mediump_u16vec1; - - /// Medium qualifier 16 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u16, mediump> mediump_u16vec2; - - /// Medium qualifier 16 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u16, mediump> mediump_u16vec3; - - /// Medium qualifier 16 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u16, mediump> mediump_u16vec4; - - - /// High qualifier 16 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u16, highp> highp_u16vec1; - - /// High qualifier 16 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u16, highp> highp_u16vec2; - - /// High qualifier 16 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u16, highp> highp_u16vec3; - - /// High qualifier 16 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u16, highp> highp_u16vec4; - - - - - /// Default qualifier 16 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u16, defaultp> u16vec1; - - /// Default qualifier 16 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u16, defaultp> u16vec2; - - /// Default qualifier 16 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u16, defaultp> u16vec3; - - /// Default qualifier 16 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u16, defaultp> u16vec4; - - - - /// Low qualifier 32 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u32, lowp> lowp_u32vec1; - - /// Low qualifier 32 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u32, lowp> lowp_u32vec2; - - /// Low qualifier 32 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u32, lowp> lowp_u32vec3; - - /// Low qualifier 32 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u32, lowp> lowp_u32vec4; - - - /// Medium qualifier 32 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u32, mediump> mediump_u32vec1; - - /// Medium qualifier 32 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u32, mediump> mediump_u32vec2; - - /// Medium qualifier 32 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u32, mediump> mediump_u32vec3; - - /// Medium qualifier 32 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u32, mediump> mediump_u32vec4; - - - /// High qualifier 32 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u32, highp> highp_u32vec1; - - /// High qualifier 32 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u32, highp> highp_u32vec2; - - /// High qualifier 32 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u32, highp> highp_u32vec3; - - /// High qualifier 32 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u32, highp> highp_u32vec4; - - - - /// Default qualifier 32 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u32, defaultp> u32vec1; - - /// Default qualifier 32 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u32, defaultp> u32vec2; - - /// Default qualifier 32 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u32, defaultp> u32vec3; - - /// Default qualifier 32 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u32, defaultp> u32vec4; - - - - - /// Low qualifier 64 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u64, lowp> lowp_u64vec1; - - /// Low qualifier 64 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u64, lowp> lowp_u64vec2; - - /// Low qualifier 64 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u64, lowp> lowp_u64vec3; - - /// Low qualifier 64 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u64, lowp> lowp_u64vec4; - - - /// Medium qualifier 64 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u64, mediump> mediump_u64vec1; - - /// Medium qualifier 64 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u64, mediump> mediump_u64vec2; - - /// Medium qualifier 64 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u64, mediump> mediump_u64vec3; - - /// Medium qualifier 64 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u64, mediump> mediump_u64vec4; - - - /// High qualifier 64 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u64, highp> highp_u64vec1; - - /// High qualifier 64 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u64, highp> highp_u64vec2; - - /// High qualifier 64 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u64, highp> highp_u64vec3; - - /// High qualifier 64 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u64, highp> highp_u64vec4; - - - - - /// Default qualifier 64 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u64, defaultp> u64vec1; - - /// Default qualifier 64 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u64, defaultp> u64vec2; - - /// Default qualifier 64 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u64, defaultp> u64vec3; - - /// Default qualifier 64 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u64, defaultp> u64vec4; - - - ////////////////////// - // Float vector types - - /// 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 float32_t; - - /// 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 f32; - -# ifndef GLM_FORCE_SINGLE_ONLY - - /// 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 float64_t; - - /// 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 f64; -# endif//GLM_FORCE_SINGLE_ONLY - - /// Single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, float, defaultp> fvec1; - - /// Single-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, float, defaultp> fvec2; - - /// Single-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, float, defaultp> fvec3; - - /// Single-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, float, defaultp> fvec4; - - - /// Single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f32, defaultp> f32vec1; - - /// Single-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, f32, defaultp> f32vec2; - - /// Single-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, f32, defaultp> f32vec3; - - /// Single-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, f32, defaultp> f32vec4; - -# ifndef GLM_FORCE_SINGLE_ONLY - /// Double-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f64, defaultp> f64vec1; - - /// Double-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, f64, defaultp> f64vec2; - - /// Double-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, f64, defaultp> f64vec3; - - /// Double-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, f64, defaultp> f64vec4; -# endif//GLM_FORCE_SINGLE_ONLY - - - ////////////////////// - // Float matrix types - - /// Single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef detail::tmat1x1 fmat1; - - /// Single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, defaultp> fmat2; - - /// Single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, defaultp> fmat3; - - /// Single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, defaultp> fmat4; - - - /// Single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f32 fmat1x1; - - /// Single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, defaultp> fmat2x2; - - /// Single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, defaultp> fmat2x3; - - /// Single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, defaultp> fmat2x4; - - /// Single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, defaultp> fmat3x2; - - /// Single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, defaultp> fmat3x3; - - /// Single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, defaultp> fmat3x4; - - /// Single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, defaultp> fmat4x2; - - /// Single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, defaultp> fmat4x3; - - /// Single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, defaultp> fmat4x4; - - - /// Single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef detail::tmat1x1 f32mat1; - - /// Single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, defaultp> f32mat2; - - /// Single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, defaultp> f32mat3; - - /// Single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, defaultp> f32mat4; - - - /// Single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f32 f32mat1x1; - - /// Single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, defaultp> f32mat2x2; - - /// Single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, defaultp> f32mat2x3; - - /// Single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, defaultp> f32mat2x4; - - /// Single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, defaultp> f32mat3x2; - - /// Single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, defaultp> f32mat3x3; - - /// Single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, defaultp> f32mat3x4; - - /// Single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, defaultp> f32mat4x2; - - /// Single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, defaultp> f32mat4x3; - - /// Single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, defaultp> f32mat4x4; - - -# ifndef GLM_FORCE_SINGLE_ONLY - - /// Double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef detail::tmat1x1 f64mat1; - - /// Double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f64, defaultp> f64mat2; - - /// Double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f64, defaultp> f64mat3; - - /// Double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f64, defaultp> f64mat4; - - - /// Double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f64 f64mat1x1; - - /// Double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f64, defaultp> f64mat2x2; - - /// Double-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f64, defaultp> f64mat2x3; - - /// Double-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f64, defaultp> f64mat2x4; - - /// Double-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f64, defaultp> f64mat3x2; - - /// Double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f64, defaultp> f64mat3x3; - - /// Double-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f64, defaultp> f64mat3x4; - - /// Double-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f64, defaultp> f64mat4x2; - - /// Double-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f64, defaultp> f64mat4x3; - - /// Double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f64, defaultp> f64mat4x4; - -# endif//GLM_FORCE_SINGLE_ONLY - - ////////////////////////// - // Quaternion types - - /// Single-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua f32quat; - - /// Low single-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua lowp_f32quat; - - /// Low double-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua lowp_f64quat; - - /// Medium single-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua mediump_f32quat; - -# ifndef GLM_FORCE_SINGLE_ONLY - - /// Medium double-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua mediump_f64quat; - - /// High single-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua highp_f32quat; - - /// High double-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua highp_f64quat; - - /// Double-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua f64quat; - -# endif//GLM_FORCE_SINGLE_ONLY - - /// @} -}//namespace glm - -#include "type_precision.inl" diff --git a/third_party/glm/gtc/type_precision.inl b/third_party/glm/gtc/type_precision.inl deleted file mode 100755 index ae80912..0000000 --- a/third_party/glm/gtc/type_precision.inl +++ /dev/null @@ -1,6 +0,0 @@ -/// @ref gtc_precision - -namespace glm -{ - -} diff --git a/third_party/glm/gtc/type_ptr.hpp b/third_party/glm/gtc/type_ptr.hpp deleted file mode 100755 index d7e625a..0000000 --- a/third_party/glm/gtc/type_ptr.hpp +++ /dev/null @@ -1,230 +0,0 @@ -/// @ref gtc_type_ptr -/// @file glm/gtc/type_ptr.hpp -/// -/// @see core (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtc_type_ptr GLM_GTC_type_ptr -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Handles the interaction between pointers and vector, matrix types. -/// -/// This extension defines an overloaded function, glm::value_ptr. It returns -/// a pointer to the memory layout of the object. Matrix types store their values -/// in column-major order. -/// -/// This is useful for uploading data to matrices or copying data to buffer objects. -/// -/// Example: -/// @code -/// #include -/// #include -/// -/// glm::vec3 aVector(3); -/// glm::mat4 someMatrix(1.0); -/// -/// glUniform3fv(uniformLoc, 1, glm::value_ptr(aVector)); -/// glUniformMatrix4fv(uniformMatrixLoc, 1, GL_FALSE, glm::value_ptr(someMatrix)); -/// @endcode -/// -/// need to be included to use the features of this extension. - -#pragma once - -// Dependency: -#include "../gtc/quaternion.hpp" -#include "../gtc/vec1.hpp" -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include "../mat2x2.hpp" -#include "../mat2x3.hpp" -#include "../mat2x4.hpp" -#include "../mat3x2.hpp" -#include "../mat3x3.hpp" -#include "../mat3x4.hpp" -#include "../mat4x2.hpp" -#include "../mat4x3.hpp" -#include "../mat4x4.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_type_ptr extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_type_ptr - /// @{ - - /// Return the constant address to the data of the input parameter. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL typename genType::value_type const * value_ptr(genType const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<1, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<2, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<3, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<4, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<1, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<2, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<3, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<4, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<1, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<2, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<3, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<4, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<1, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<2, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<3, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<4, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<2, T, defaultp> make_vec2(T const * const ptr); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<3, T, defaultp> make_vec3(T const * const ptr); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<4, T, defaultp> make_vec4(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<2, 2, T, defaultp> make_mat2x2(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<2, 3, T, defaultp> make_mat2x3(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<2, 4, T, defaultp> make_mat2x4(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<3, 2, T, defaultp> make_mat3x2(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<3, 3, T, defaultp> make_mat3x3(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<3, 4, T, defaultp> make_mat3x4(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<4, 2, T, defaultp> make_mat4x2(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<4, 3, T, defaultp> make_mat4x3(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> make_mat4x4(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<2, 2, T, defaultp> make_mat2(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<3, 3, T, defaultp> make_mat3(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> make_mat4(T const * const ptr); - - /// Build a quaternion from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL qua make_quat(T const * const ptr); - - /// @} -}//namespace glm - -#include "type_ptr.inl" diff --git a/third_party/glm/gtc/type_ptr.inl b/third_party/glm/gtc/type_ptr.inl deleted file mode 100755 index 71df4d3..0000000 --- a/third_party/glm/gtc/type_ptr.inl +++ /dev/null @@ -1,386 +0,0 @@ -/// @ref gtc_type_ptr - -#include - -namespace glm -{ - /// @addtogroup gtc_type_ptr - /// @{ - - template - GLM_FUNC_QUALIFIER T const* value_ptr(vec<2, T, Q> const& v) - { - return &(v.x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(vec<2, T, Q>& v) - { - return &(v.x); - } - - template - GLM_FUNC_QUALIFIER T const * value_ptr(vec<3, T, Q> const& v) - { - return &(v.x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(vec<3, T, Q>& v) - { - return &(v.x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(vec<4, T, Q> const& v) - { - return &(v.x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(vec<4, T, Q>& v) - { - return &(v.x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 2, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 2, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 3, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 3, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 4, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<4, 4, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 3, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 3, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 2, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 2, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 4, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 4, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 2, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<4, 2, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 4, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 4, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 3, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T * value_ptr(mat<4, 3, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const * value_ptr(qua const& q) - { - return &(q[0]); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(qua& q) - { - return &(q[0]); - } - - template - inline vec<1, T, Q> make_vec1(vec<1, T, Q> const& v) - { - return v; - } - - template - inline vec<1, T, Q> make_vec1(vec<2, T, Q> const& v) - { - return vec<1, T, Q>(v); - } - - template - inline vec<1, T, Q> make_vec1(vec<3, T, Q> const& v) - { - return vec<1, T, Q>(v); - } - - template - inline vec<1, T, Q> make_vec1(vec<4, T, Q> const& v) - { - return vec<1, T, Q>(v); - } - - template - inline vec<2, T, Q> make_vec2(vec<1, T, Q> const& v) - { - return vec<2, T, Q>(v.x, static_cast(0)); - } - - template - inline vec<2, T, Q> make_vec2(vec<2, T, Q> const& v) - { - return v; - } - - template - inline vec<2, T, Q> make_vec2(vec<3, T, Q> const& v) - { - return vec<2, T, Q>(v); - } - - template - inline vec<2, T, Q> make_vec2(vec<4, T, Q> const& v) - { - return vec<2, T, Q>(v); - } - - template - inline vec<3, T, Q> make_vec3(vec<1, T, Q> const& v) - { - return vec<3, T, Q>(v.x, static_cast(0), static_cast(0)); - } - - template - inline vec<3, T, Q> make_vec3(vec<2, T, Q> const& v) - { - return vec<3, T, Q>(v.x, v.y, static_cast(0)); - } - - template - inline vec<3, T, Q> make_vec3(vec<3, T, Q> const& v) - { - return v; - } - - template - inline vec<3, T, Q> make_vec3(vec<4, T, Q> const& v) - { - return vec<3, T, Q>(v); - } - - template - inline vec<4, T, Q> make_vec4(vec<1, T, Q> const& v) - { - return vec<4, T, Q>(v.x, static_cast(0), static_cast(0), static_cast(1)); - } - - template - inline vec<4, T, Q> make_vec4(vec<2, T, Q> const& v) - { - return vec<4, T, Q>(v.x, v.y, static_cast(0), static_cast(1)); - } - - template - inline vec<4, T, Q> make_vec4(vec<3, T, Q> const& v) - { - return vec<4, T, Q>(v.x, v.y, v.z, static_cast(1)); - } - - template - inline vec<4, T, Q> make_vec4(vec<4, T, Q> const& v) - { - return v; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, defaultp> make_vec2(T const *const ptr) - { - vec<2, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(vec<2, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, defaultp> make_vec3(T const *const ptr) - { - vec<3, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(vec<3, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, defaultp> make_vec4(T const *const ptr) - { - vec<4, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(vec<4, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> make_mat2x2(T const *const ptr) - { - mat<2, 2, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<2, 2, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, defaultp> make_mat2x3(T const *const ptr) - { - mat<2, 3, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<2, 3, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, defaultp> make_mat2x4(T const *const ptr) - { - mat<2, 4, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<2, 4, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, defaultp> make_mat3x2(T const *const ptr) - { - mat<3, 2, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<3, 2, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> make_mat3x3(T const *const ptr) - { - mat<3, 3, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<3, 3, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, defaultp> make_mat3x4(T const *const ptr) - { - mat<3, 4, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<3, 4, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, defaultp> make_mat4x2(T const *const ptr) - { - mat<4, 2, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<4, 2, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, defaultp> make_mat4x3(T const *const ptr) - { - mat<4, 3, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<4, 3, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> make_mat4x4(T const *const ptr) - { - mat<4, 4, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<4, 4, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> make_mat2(T const *const ptr) - { - return make_mat2x2(ptr); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> make_mat3(T const *const ptr) - { - return make_mat3x3(ptr); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> make_mat4(T const *const ptr) - { - return make_mat4x4(ptr); - } - - template - GLM_FUNC_QUALIFIER qua make_quat(T const *const ptr) - { - qua Result; - memcpy(value_ptr(Result), ptr, sizeof(qua)); - return Result; - } - - /// @} -}//namespace glm - diff --git a/third_party/glm/gtc/ulp.hpp b/third_party/glm/gtc/ulp.hpp deleted file mode 100755 index 0d80a75..0000000 --- a/third_party/glm/gtc/ulp.hpp +++ /dev/null @@ -1,152 +0,0 @@ -/// @ref gtc_ulp -/// @file glm/gtc/ulp.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_ulp GLM_GTC_ulp -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Allow the measurement of the accuracy of a function against a reference -/// implementation. This extension works on floating-point data and provide results -/// in ULP. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/_vectorize.hpp" -#include "../ext/scalar_int_sized.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_ulp extension included") -#endif - -namespace glm -{ - /// Return the next ULP value(s) after the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL genType next_float(genType x); - - /// Return the previous ULP value(s) before the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL genType prev_float(genType x); - - /// Return the value(s) ULP distance after the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL genType next_float(genType x, int ULPs); - - /// Return the value(s) ULP distance before the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL genType prev_float(genType x, int ULPs); - - /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. - /// - /// @see gtc_ulp - GLM_FUNC_DECL int float_distance(float x, float y); - - /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. - /// - /// @see gtc_ulp - GLM_FUNC_DECL int64 float_distance(double x, double y); - - /// Return the next ULP value(s) after the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec next_float(vec const& x); - - /// Return the value(s) ULP distance after the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec next_float(vec const& x, int ULPs); - - /// Return the value(s) ULP distance after the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec next_float(vec const& x, vec const& ULPs); - - /// Return the previous ULP value(s) before the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec prev_float(vec const& x); - - /// Return the value(s) ULP distance before the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec prev_float(vec const& x, int ULPs); - - /// Return the value(s) ULP distance before the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec prev_float(vec const& x, vec const& ULPs); - - /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec float_distance(vec const& x, vec const& y); - - /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec float_distance(vec const& x, vec const& y); - - /// @} -}//namespace glm - -#include "ulp.inl" diff --git a/third_party/glm/gtc/ulp.inl b/third_party/glm/gtc/ulp.inl deleted file mode 100755 index 4ecbd3f..0000000 --- a/third_party/glm/gtc/ulp.inl +++ /dev/null @@ -1,173 +0,0 @@ -/// @ref gtc_ulp - -#include "../ext/scalar_ulp.hpp" - -namespace glm -{ - template<> - GLM_FUNC_QUALIFIER float next_float(float x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::max()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return detail::nextafterf(x, FLT_MAX); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafterf(x, FLT_MAX); -# else - return nextafterf(x, FLT_MAX); -# endif - } - - template<> - GLM_FUNC_QUALIFIER double next_float(double x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::max()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return detail::nextafter(x, std::numeric_limits::max()); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafter(x, DBL_MAX); -# else - return nextafter(x, DBL_MAX); -# endif - } - - template - GLM_FUNC_QUALIFIER T next_float(T x, int ULPs) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'next_float' only accept floating-point input"); - assert(ULPs >= 0); - - T temp = x; - for (int i = 0; i < ULPs; ++i) - temp = next_float(temp); - return temp; - } - - GLM_FUNC_QUALIFIER float prev_float(float x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::min()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return detail::nextafterf(x, FLT_MIN); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafterf(x, FLT_MIN); -# else - return nextafterf(x, FLT_MIN); -# endif - } - - GLM_FUNC_QUALIFIER double prev_float(double x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::min()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return _nextafter(x, DBL_MIN); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafter(x, DBL_MIN); -# else - return nextafter(x, DBL_MIN); -# endif - } - - template - GLM_FUNC_QUALIFIER T prev_float(T x, int ULPs) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'prev_float' only accept floating-point input"); - assert(ULPs >= 0); - - T temp = x; - for (int i = 0; i < ULPs; ++i) - temp = prev_float(temp); - return temp; - } - - GLM_FUNC_QUALIFIER int float_distance(float x, float y) - { - detail::float_t const a(x); - detail::float_t const b(y); - - return abs(a.i - b.i); - } - - GLM_FUNC_QUALIFIER int64 float_distance(double x, double y) - { - detail::float_t const a(x); - detail::float_t const b(y); - - return abs(a.i - b.i); - } - - template - GLM_FUNC_QUALIFIER vec next_float(vec const& x) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = next_float(x[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec next_float(vec const& x, int ULPs) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = next_float(x[i], ULPs); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec next_float(vec const& x, vec const& ULPs) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = next_float(x[i], ULPs[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec prev_float(vec const& x) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = prev_float(x[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec prev_float(vec const& x, int ULPs) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = prev_float(x[i], ULPs); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec prev_float(vec const& x, vec const& ULPs) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = prev_float(x[i], ULPs[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec float_distance(vec const& x, vec const& y) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = float_distance(x[i], y[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec float_distance(vec const& x, vec const& y) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = float_distance(x[i], y[i]); - return Result; - } -}//namespace glm - diff --git a/third_party/glm/gtc/vec1.hpp b/third_party/glm/gtc/vec1.hpp deleted file mode 100755 index c20be87..0000000 --- a/third_party/glm/gtc/vec1.hpp +++ /dev/null @@ -1,30 +0,0 @@ -/// @ref gtc_vec1 -/// @file glm/gtc/vec1.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_vec1 GLM_GTC_vec1 -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Add vec1, ivec1, uvec1 and bvec1 types. - -#pragma once - -// Dependency: -#include "../ext/vector_bool1.hpp" -#include "../ext/vector_bool1_precision.hpp" -#include "../ext/vector_float1.hpp" -#include "../ext/vector_float1_precision.hpp" -#include "../ext/vector_double1.hpp" -#include "../ext/vector_double1_precision.hpp" -#include "../ext/vector_int1.hpp" -#include "../ext/vector_int1_precision.hpp" -#include "../ext/vector_uint1.hpp" -#include "../ext/vector_uint1_precision.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_vec1 extension included") -#endif - diff --git a/third_party/glm/gtx/associated_min_max.hpp b/third_party/glm/gtx/associated_min_max.hpp deleted file mode 100755 index d1a41c0..0000000 --- a/third_party/glm/gtx/associated_min_max.hpp +++ /dev/null @@ -1,207 +0,0 @@ -/// @ref gtx_associated_min_max -/// @file glm/gtx/associated_min_max.hpp -/// -/// @see core (dependence) -/// @see gtx_extented_min_max (dependence) -/// -/// @defgroup gtx_associated_min_max GLM_GTX_associated_min_max -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// @brief Min and max functions that return associated values not the compared onces. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_associated_min_max is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_associated_min_max extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_associated_min_max - /// @{ - - /// Minimum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL U associatedMin(T x, U a, T y, U b); - - /// Minimum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec<2, U, Q> associatedMin( - vec const& x, vec const& a, - vec const& y, vec const& b); - - /// Minimum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMin( - T x, const vec& a, - T y, const vec& b); - - /// Minimum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMin( - vec const& x, U a, - vec const& y, U b); - - /// Minimum comparison between 3 variables and returns 3 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL U associatedMin( - T x, U a, - T y, U b, - T z, U c); - - /// Minimum comparison between 3 variables and returns 3 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMin( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c); - - /// Minimum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL U associatedMin( - T x, U a, - T y, U b, - T z, U c, - T w, U d); - - /// Minimum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMin( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c, - vec const& w, vec const& d); - - /// Minimum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMin( - T x, vec const& a, - T y, vec const& b, - T z, vec const& c, - T w, vec const& d); - - /// Minimum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMin( - vec const& x, U a, - vec const& y, U b, - vec const& z, U c, - vec const& w, U d); - - /// Maximum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL U associatedMax(T x, U a, T y, U b); - - /// Maximum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec<2, U, Q> associatedMax( - vec const& x, vec const& a, - vec const& y, vec const& b); - - /// Maximum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - T x, vec const& a, - T y, vec const& b); - - /// Maximum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - vec const& x, U a, - vec const& y, U b); - - /// Maximum comparison between 3 variables and returns 3 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL U associatedMax( - T x, U a, - T y, U b, - T z, U c); - - /// Maximum comparison between 3 variables and returns 3 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c); - - /// Maximum comparison between 3 variables and returns 3 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - T x, vec const& a, - T y, vec const& b, - T z, vec const& c); - - /// Maximum comparison between 3 variables and returns 3 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - vec const& x, U a, - vec const& y, U b, - vec const& z, U c); - - /// Maximum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL U associatedMax( - T x, U a, - T y, U b, - T z, U c, - T w, U d); - - /// Maximum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c, - vec const& w, vec const& d); - - /// Maximum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - T x, vec const& a, - T y, vec const& b, - T z, vec const& c, - T w, vec const& d); - - /// Maximum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - vec const& x, U a, - vec const& y, U b, - vec const& z, U c, - vec const& w, U d); - - /// @} -} //namespace glm - -#include "associated_min_max.inl" diff --git a/third_party/glm/gtx/associated_min_max.inl b/third_party/glm/gtx/associated_min_max.inl deleted file mode 100755 index 5186c47..0000000 --- a/third_party/glm/gtx/associated_min_max.inl +++ /dev/null @@ -1,354 +0,0 @@ -/// @ref gtx_associated_min_max - -namespace glm{ - -// Min comparison between 2 variables -template -GLM_FUNC_QUALIFIER U associatedMin(T x, U a, T y, U b) -{ - return x < y ? a : b; -} - -template -GLM_FUNC_QUALIFIER vec<2, U, Q> associatedMin -( - vec const& x, vec const& a, - vec const& y, vec const& b -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x[i] < y[i] ? a[i] : b[i]; - return Result; -} - -template -GLM_FUNC_QUALIFIER vec associatedMin -( - T x, const vec& a, - T y, const vec& b -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x < y ? a[i] : b[i]; - return Result; -} - -template -GLM_FUNC_QUALIFIER vec associatedMin -( - vec const& x, U a, - vec const& y, U b -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x[i] < y[i] ? a : b; - return Result; -} - -// Min comparison between 3 variables -template -GLM_FUNC_QUALIFIER U associatedMin -( - T x, U a, - T y, U b, - T z, U c -) -{ - U Result = x < y ? (x < z ? a : c) : (y < z ? b : c); - return Result; -} - -template -GLM_FUNC_QUALIFIER vec associatedMin -( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x[i] < y[i] ? (x[i] < z[i] ? a[i] : c[i]) : (y[i] < z[i] ? b[i] : c[i]); - return Result; -} - -// Min comparison between 4 variables -template -GLM_FUNC_QUALIFIER U associatedMin -( - T x, U a, - T y, U b, - T z, U c, - T w, U d -) -{ - T Test1 = min(x, y); - T Test2 = min(z, w); - U Result1 = x < y ? a : b; - U Result2 = z < w ? c : d; - U Result = Test1 < Test2 ? Result1 : Result2; - return Result; -} - -// Min comparison between 4 variables -template -GLM_FUNC_QUALIFIER vec associatedMin -( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c, - vec const& w, vec const& d -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - { - T Test1 = min(x[i], y[i]); - T Test2 = min(z[i], w[i]); - U Result1 = x[i] < y[i] ? a[i] : b[i]; - U Result2 = z[i] < w[i] ? c[i] : d[i]; - Result[i] = Test1 < Test2 ? Result1 : Result2; - } - return Result; -} - -// Min comparison between 4 variables -template -GLM_FUNC_QUALIFIER vec associatedMin -( - T x, vec const& a, - T y, vec const& b, - T z, vec const& c, - T w, vec const& d -) -{ - T Test1 = min(x, y); - T Test2 = min(z, w); - - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - { - U Result1 = x < y ? a[i] : b[i]; - U Result2 = z < w ? c[i] : d[i]; - Result[i] = Test1 < Test2 ? Result1 : Result2; - } - return Result; -} - -// Min comparison between 4 variables -template -GLM_FUNC_QUALIFIER vec associatedMin -( - vec const& x, U a, - vec const& y, U b, - vec const& z, U c, - vec const& w, U d -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - { - T Test1 = min(x[i], y[i]); - T Test2 = min(z[i], w[i]); - U Result1 = x[i] < y[i] ? a : b; - U Result2 = z[i] < w[i] ? c : d; - Result[i] = Test1 < Test2 ? Result1 : Result2; - } - return Result; -} - -// Max comparison between 2 variables -template -GLM_FUNC_QUALIFIER U associatedMax(T x, U a, T y, U b) -{ - return x > y ? a : b; -} - -// Max comparison between 2 variables -template -GLM_FUNC_QUALIFIER vec<2, U, Q> associatedMax -( - vec const& x, vec const& a, - vec const& y, vec const& b -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x[i] > y[i] ? a[i] : b[i]; - return Result; -} - -// Max comparison between 2 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - T x, vec const& a, - T y, vec const& b -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x > y ? a[i] : b[i]; - return Result; -} - -// Max comparison between 2 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - vec const& x, U a, - vec const& y, U b -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x[i] > y[i] ? a : b; - return Result; -} - -// Max comparison between 3 variables -template -GLM_FUNC_QUALIFIER U associatedMax -( - T x, U a, - T y, U b, - T z, U c -) -{ - U Result = x > y ? (x > z ? a : c) : (y > z ? b : c); - return Result; -} - -// Max comparison between 3 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x[i] > y[i] ? (x[i] > z[i] ? a[i] : c[i]) : (y[i] > z[i] ? b[i] : c[i]); - return Result; -} - -// Max comparison between 3 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - T x, vec const& a, - T y, vec const& b, - T z, vec const& c -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x > y ? (x > z ? a[i] : c[i]) : (y > z ? b[i] : c[i]); - return Result; -} - -// Max comparison between 3 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - vec const& x, U a, - vec const& y, U b, - vec const& z, U c -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x[i] > y[i] ? (x[i] > z[i] ? a : c) : (y[i] > z[i] ? b : c); - return Result; -} - -// Max comparison between 4 variables -template -GLM_FUNC_QUALIFIER U associatedMax -( - T x, U a, - T y, U b, - T z, U c, - T w, U d -) -{ - T Test1 = max(x, y); - T Test2 = max(z, w); - U Result1 = x > y ? a : b; - U Result2 = z > w ? c : d; - U Result = Test1 > Test2 ? Result1 : Result2; - return Result; -} - -// Max comparison between 4 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c, - vec const& w, vec const& d -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - { - T Test1 = max(x[i], y[i]); - T Test2 = max(z[i], w[i]); - U Result1 = x[i] > y[i] ? a[i] : b[i]; - U Result2 = z[i] > w[i] ? c[i] : d[i]; - Result[i] = Test1 > Test2 ? Result1 : Result2; - } - return Result; -} - -// Max comparison between 4 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - T x, vec const& a, - T y, vec const& b, - T z, vec const& c, - T w, vec const& d -) -{ - T Test1 = max(x, y); - T Test2 = max(z, w); - - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - { - U Result1 = x > y ? a[i] : b[i]; - U Result2 = z > w ? c[i] : d[i]; - Result[i] = Test1 > Test2 ? Result1 : Result2; - } - return Result; -} - -// Max comparison between 4 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - vec const& x, U a, - vec const& y, U b, - vec const& z, U c, - vec const& w, U d -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - { - T Test1 = max(x[i], y[i]); - T Test2 = max(z[i], w[i]); - U Result1 = x[i] > y[i] ? a : b; - U Result2 = z[i] > w[i] ? c : d; - Result[i] = Test1 > Test2 ? Result1 : Result2; - } - return Result; -} -}//namespace glm diff --git a/third_party/glm/gtx/bit.hpp b/third_party/glm/gtx/bit.hpp deleted file mode 100755 index 60a7aef..0000000 --- a/third_party/glm/gtx/bit.hpp +++ /dev/null @@ -1,98 +0,0 @@ -/// @ref gtx_bit -/// @file glm/gtx/bit.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_bit GLM_GTX_bit -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Allow to perform bit operations on integer values - -#pragma once - -// Dependencies -#include "../gtc/bitfield.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_bit is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_bit extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_bit - /// @{ - - /// @see gtx_bit - template - GLM_FUNC_DECL genIUType highestBitValue(genIUType Value); - - /// @see gtx_bit - template - GLM_FUNC_DECL genIUType lowestBitValue(genIUType Value); - - /// Find the highest bit set to 1 in a integer variable and return its value. - /// - /// @see gtx_bit - template - GLM_FUNC_DECL vec highestBitValue(vec const& value); - - /// Return the power of two number which value is just higher the input value. - /// Deprecated, use ceilPowerOfTwo from GTC_round instead - /// - /// @see gtc_round - /// @see gtx_bit - template - GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoAbove(genIUType Value); - - /// Return the power of two number which value is just higher the input value. - /// Deprecated, use ceilPowerOfTwo from GTC_round instead - /// - /// @see gtc_round - /// @see gtx_bit - template - GLM_DEPRECATED GLM_FUNC_DECL vec powerOfTwoAbove(vec const& value); - - /// Return the power of two number which value is just lower the input value. - /// Deprecated, use floorPowerOfTwo from GTC_round instead - /// - /// @see gtc_round - /// @see gtx_bit - template - GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoBelow(genIUType Value); - - /// Return the power of two number which value is just lower the input value. - /// Deprecated, use floorPowerOfTwo from GTC_round instead - /// - /// @see gtc_round - /// @see gtx_bit - template - GLM_DEPRECATED GLM_FUNC_DECL vec powerOfTwoBelow(vec const& value); - - /// Return the power of two number which value is the closet to the input value. - /// Deprecated, use roundPowerOfTwo from GTC_round instead - /// - /// @see gtc_round - /// @see gtx_bit - template - GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoNearest(genIUType Value); - - /// Return the power of two number which value is the closet to the input value. - /// Deprecated, use roundPowerOfTwo from GTC_round instead - /// - /// @see gtc_round - /// @see gtx_bit - template - GLM_DEPRECATED GLM_FUNC_DECL vec powerOfTwoNearest(vec const& value); - - /// @} -} //namespace glm - - -#include "bit.inl" - diff --git a/third_party/glm/gtx/bit.inl b/third_party/glm/gtx/bit.inl deleted file mode 100755 index 621b626..0000000 --- a/third_party/glm/gtx/bit.inl +++ /dev/null @@ -1,92 +0,0 @@ -/// @ref gtx_bit - -namespace glm -{ - /////////////////// - // highestBitValue - - template - GLM_FUNC_QUALIFIER genIUType highestBitValue(genIUType Value) - { - genIUType tmp = Value; - genIUType result = genIUType(0); - while(tmp) - { - result = (tmp & (~tmp + 1)); // grab lowest bit - tmp &= ~result; // clear lowest bit - } - return result; - } - - template - GLM_FUNC_QUALIFIER vec highestBitValue(vec const& v) - { - return detail::functor1::call(highestBitValue, v); - } - - /////////////////// - // lowestBitValue - - template - GLM_FUNC_QUALIFIER genIUType lowestBitValue(genIUType Value) - { - return (Value & (~Value + 1)); - } - - template - GLM_FUNC_QUALIFIER vec lowestBitValue(vec const& v) - { - return detail::functor1::call(lowestBitValue, v); - } - - /////////////////// - // powerOfTwoAbove - - template - GLM_FUNC_QUALIFIER genType powerOfTwoAbove(genType value) - { - return isPowerOfTwo(value) ? value : highestBitValue(value) << 1; - } - - template - GLM_FUNC_QUALIFIER vec powerOfTwoAbove(vec const& v) - { - return detail::functor1::call(powerOfTwoAbove, v); - } - - /////////////////// - // powerOfTwoBelow - - template - GLM_FUNC_QUALIFIER genType powerOfTwoBelow(genType value) - { - return isPowerOfTwo(value) ? value : highestBitValue(value); - } - - template - GLM_FUNC_QUALIFIER vec powerOfTwoBelow(vec const& v) - { - return detail::functor1::call(powerOfTwoBelow, v); - } - - ///////////////////// - // powerOfTwoNearest - - template - GLM_FUNC_QUALIFIER genType powerOfTwoNearest(genType value) - { - if(isPowerOfTwo(value)) - return value; - - genType const prev = highestBitValue(value); - genType const next = prev << 1; - return (next - value) < (value - prev) ? next : prev; - } - - template - GLM_FUNC_QUALIFIER vec powerOfTwoNearest(vec const& v) - { - return detail::functor1::call(powerOfTwoNearest, v); - } - -}//namespace glm diff --git a/third_party/glm/gtx/closest_point.hpp b/third_party/glm/gtx/closest_point.hpp deleted file mode 100755 index de6dbbf..0000000 --- a/third_party/glm/gtx/closest_point.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref gtx_closest_point -/// @file glm/gtx/closest_point.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_closest_point GLM_GTX_closest_point -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Find the point on a straight line which is the closet of a point. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_closest_point is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_closest_point extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_closest_point - /// @{ - - /// Find the point on a straight line which is the closet of a point. - /// @see gtx_closest_point - template - GLM_FUNC_DECL vec<3, T, Q> closestPointOnLine( - vec<3, T, Q> const& point, - vec<3, T, Q> const& a, - vec<3, T, Q> const& b); - - /// 2d lines work as well - template - GLM_FUNC_DECL vec<2, T, Q> closestPointOnLine( - vec<2, T, Q> const& point, - vec<2, T, Q> const& a, - vec<2, T, Q> const& b); - - /// @} -}// namespace glm - -#include "closest_point.inl" diff --git a/third_party/glm/gtx/closest_point.inl b/third_party/glm/gtx/closest_point.inl deleted file mode 100755 index 0a39b04..0000000 --- a/third_party/glm/gtx/closest_point.inl +++ /dev/null @@ -1,45 +0,0 @@ -/// @ref gtx_closest_point - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> closestPointOnLine - ( - vec<3, T, Q> const& point, - vec<3, T, Q> const& a, - vec<3, T, Q> const& b - ) - { - T LineLength = distance(a, b); - vec<3, T, Q> Vector = point - a; - vec<3, T, Q> LineDirection = (b - a) / LineLength; - - // Project Vector to LineDirection to get the distance of point from a - T Distance = dot(Vector, LineDirection); - - if(Distance <= T(0)) return a; - if(Distance >= LineLength) return b; - return a + LineDirection * Distance; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> closestPointOnLine - ( - vec<2, T, Q> const& point, - vec<2, T, Q> const& a, - vec<2, T, Q> const& b - ) - { - T LineLength = distance(a, b); - vec<2, T, Q> Vector = point - a; - vec<2, T, Q> LineDirection = (b - a) / LineLength; - - // Project Vector to LineDirection to get the distance of point from a - T Distance = dot(Vector, LineDirection); - - if(Distance <= T(0)) return a; - if(Distance >= LineLength) return b; - return a + LineDirection * Distance; - } - -}//namespace glm diff --git a/third_party/glm/gtx/color_encoding.hpp b/third_party/glm/gtx/color_encoding.hpp deleted file mode 100755 index 96ded2a..0000000 --- a/third_party/glm/gtx/color_encoding.hpp +++ /dev/null @@ -1,54 +0,0 @@ -/// @ref gtx_color_encoding -/// @file glm/gtx/color_encoding.hpp -/// -/// @see core (dependence) -/// @see gtx_color_encoding (dependence) -/// -/// @defgroup gtx_color_encoding GLM_GTX_color_encoding -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// @brief Allow to perform bit operations on integer values - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../vec3.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTC_color_encoding is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTC_color_encoding extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_color_encoding - /// @{ - - /// Convert a linear sRGB color to D65 YUV. - template - GLM_FUNC_DECL vec<3, T, Q> convertLinearSRGBToD65XYZ(vec<3, T, Q> const& ColorLinearSRGB); - - /// Convert a linear sRGB color to D50 YUV. - template - GLM_FUNC_DECL vec<3, T, Q> convertLinearSRGBToD50XYZ(vec<3, T, Q> const& ColorLinearSRGB); - - /// Convert a D65 YUV color to linear sRGB. - template - GLM_FUNC_DECL vec<3, T, Q> convertD65XYZToLinearSRGB(vec<3, T, Q> const& ColorD65XYZ); - - /// Convert a D65 YUV color to D50 YUV. - template - GLM_FUNC_DECL vec<3, T, Q> convertD65XYZToD50XYZ(vec<3, T, Q> const& ColorD65XYZ); - - /// @} -} //namespace glm - -#include "color_encoding.inl" diff --git a/third_party/glm/gtx/color_encoding.inl b/third_party/glm/gtx/color_encoding.inl deleted file mode 100755 index e50fa3e..0000000 --- a/third_party/glm/gtx/color_encoding.inl +++ /dev/null @@ -1,45 +0,0 @@ -/// @ref gtx_color_encoding - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> convertLinearSRGBToD65XYZ(vec<3, T, Q> const& ColorLinearSRGB) - { - vec<3, T, Q> const M(0.490f, 0.17697f, 0.2f); - vec<3, T, Q> const N(0.31f, 0.8124f, 0.01063f); - vec<3, T, Q> const O(0.490f, 0.01f, 0.99f); - - return (M * ColorLinearSRGB + N * ColorLinearSRGB + O * ColorLinearSRGB) * static_cast(5.650675255693055f); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> convertLinearSRGBToD50XYZ(vec<3, T, Q> const& ColorLinearSRGB) - { - vec<3, T, Q> const M(0.436030342570117f, 0.222438466210245f, 0.013897440074263f); - vec<3, T, Q> const N(0.385101860087134f, 0.716942745571917f, 0.097076381494207f); - vec<3, T, Q> const O(0.143067806654203f, 0.060618777416563f, 0.713926257896652f); - - return M * ColorLinearSRGB + N * ColorLinearSRGB + O * ColorLinearSRGB; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> convertD65XYZToLinearSRGB(vec<3, T, Q> const& ColorD65XYZ) - { - vec<3, T, Q> const M(0.41847f, -0.091169f, 0.0009209f); - vec<3, T, Q> const N(-0.15866f, 0.25243f, 0.015708f); - vec<3, T, Q> const O(0.0009209f, -0.0025498f, 0.1786f); - - return M * ColorD65XYZ + N * ColorD65XYZ + O * ColorD65XYZ; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> convertD65XYZToD50XYZ(vec<3, T, Q> const& ColorD65XYZ) - { - vec<3, T, Q> const M(+1.047844353856414f, +0.029549007606644f, -0.009250984365223f); - vec<3, T, Q> const N(+0.022898981050086f, +0.990508028941971f, +0.015072338237051f); - vec<3, T, Q> const O(-0.050206647741605f, -0.017074711360960f, +0.751717835079977f); - - return M * ColorD65XYZ + N * ColorD65XYZ + O * ColorD65XYZ; - } - -}//namespace glm diff --git a/third_party/glm/gtx/color_space.hpp b/third_party/glm/gtx/color_space.hpp deleted file mode 100755 index a634392..0000000 --- a/third_party/glm/gtx/color_space.hpp +++ /dev/null @@ -1,72 +0,0 @@ -/// @ref gtx_color_space -/// @file glm/gtx/color_space.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_color_space GLM_GTX_color_space -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Related to RGB to HSV conversions and operations. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_color_space is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_color_space extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_color_space - /// @{ - - /// Converts a color from HSV color space to its color in RGB color space. - /// @see gtx_color_space - template - GLM_FUNC_DECL vec<3, T, Q> rgbColor( - vec<3, T, Q> const& hsvValue); - - /// Converts a color from RGB color space to its color in HSV color space. - /// @see gtx_color_space - template - GLM_FUNC_DECL vec<3, T, Q> hsvColor( - vec<3, T, Q> const& rgbValue); - - /// Build a saturation matrix. - /// @see gtx_color_space - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> saturation( - T const s); - - /// Modify the saturation of a color. - /// @see gtx_color_space - template - GLM_FUNC_DECL vec<3, T, Q> saturation( - T const s, - vec<3, T, Q> const& color); - - /// Modify the saturation of a color. - /// @see gtx_color_space - template - GLM_FUNC_DECL vec<4, T, Q> saturation( - T const s, - vec<4, T, Q> const& color); - - /// Compute color luminosity associating ratios (0.33, 0.59, 0.11) to RGB canals. - /// @see gtx_color_space - template - GLM_FUNC_DECL T luminosity( - vec<3, T, Q> const& color); - - /// @} -}//namespace glm - -#include "color_space.inl" diff --git a/third_party/glm/gtx/color_space.inl b/third_party/glm/gtx/color_space.inl deleted file mode 100755 index f698afe..0000000 --- a/third_party/glm/gtx/color_space.inl +++ /dev/null @@ -1,141 +0,0 @@ -/// @ref gtx_color_space - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rgbColor(const vec<3, T, Q>& hsvColor) - { - vec<3, T, Q> hsv = hsvColor; - vec<3, T, Q> rgbColor; - - if(hsv.y == static_cast(0)) - // achromatic (grey) - rgbColor = vec<3, T, Q>(hsv.z); - else - { - T sector = floor(hsv.x * (T(1) / T(60))); - T frac = (hsv.x * (T(1) / T(60))) - sector; - // factorial part of h - T o = hsv.z * (T(1) - hsv.y); - T p = hsv.z * (T(1) - hsv.y * frac); - T q = hsv.z * (T(1) - hsv.y * (T(1) - frac)); - - switch(int(sector)) - { - default: - case 0: - rgbColor.r = hsv.z; - rgbColor.g = q; - rgbColor.b = o; - break; - case 1: - rgbColor.r = p; - rgbColor.g = hsv.z; - rgbColor.b = o; - break; - case 2: - rgbColor.r = o; - rgbColor.g = hsv.z; - rgbColor.b = q; - break; - case 3: - rgbColor.r = o; - rgbColor.g = p; - rgbColor.b = hsv.z; - break; - case 4: - rgbColor.r = q; - rgbColor.g = o; - rgbColor.b = hsv.z; - break; - case 5: - rgbColor.r = hsv.z; - rgbColor.g = o; - rgbColor.b = p; - break; - } - } - - return rgbColor; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> hsvColor(const vec<3, T, Q>& rgbColor) - { - vec<3, T, Q> hsv = rgbColor; - float Min = min(min(rgbColor.r, rgbColor.g), rgbColor.b); - float Max = max(max(rgbColor.r, rgbColor.g), rgbColor.b); - float Delta = Max - Min; - - hsv.z = Max; - - if(Max != static_cast(0)) - { - hsv.y = Delta / hsv.z; - T h = static_cast(0); - - if(rgbColor.r == Max) - // between yellow & magenta - h = static_cast(0) + T(60) * (rgbColor.g - rgbColor.b) / Delta; - else if(rgbColor.g == Max) - // between cyan & yellow - h = static_cast(120) + T(60) * (rgbColor.b - rgbColor.r) / Delta; - else - // between magenta & cyan - h = static_cast(240) + T(60) * (rgbColor.r - rgbColor.g) / Delta; - - if(h < T(0)) - hsv.x = h + T(360); - else - hsv.x = h; - } - else - { - // If r = g = b = 0 then s = 0, h is undefined - hsv.y = static_cast(0); - hsv.x = static_cast(0); - } - - return hsv; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> saturation(T const s) - { - vec<3, T, defaultp> rgbw = vec<3, T, defaultp>(T(0.2126), T(0.7152), T(0.0722)); - - vec<3, T, defaultp> const col((T(1) - s) * rgbw); - - mat<4, 4, T, defaultp> result(T(1)); - result[0][0] = col.x + s; - result[0][1] = col.x; - result[0][2] = col.x; - result[1][0] = col.y; - result[1][1] = col.y + s; - result[1][2] = col.y; - result[2][0] = col.z; - result[2][1] = col.z; - result[2][2] = col.z + s; - - return result; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> saturation(const T s, const vec<3, T, Q>& color) - { - return vec<3, T, Q>(saturation(s) * vec<4, T, Q>(color, T(0))); - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> saturation(const T s, const vec<4, T, Q>& color) - { - return saturation(s) * color; - } - - template - GLM_FUNC_QUALIFIER T luminosity(const vec<3, T, Q>& color) - { - const vec<3, T, Q> tmp = vec<3, T, Q>(0.33, 0.59, 0.11); - return dot(color, tmp); - } -}//namespace glm diff --git a/third_party/glm/gtx/color_space_YCoCg.hpp b/third_party/glm/gtx/color_space_YCoCg.hpp deleted file mode 100755 index dd2b771..0000000 --- a/third_party/glm/gtx/color_space_YCoCg.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/// @ref gtx_color_space_YCoCg -/// @file glm/gtx/color_space_YCoCg.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_color_space_YCoCg GLM_GTX_color_space_YCoCg -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// RGB to YCoCg conversions and operations - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_color_space_YCoCg is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_color_space_YCoCg extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_color_space_YCoCg - /// @{ - - /// Convert a color from RGB color space to YCoCg color space. - /// @see gtx_color_space_YCoCg - template - GLM_FUNC_DECL vec<3, T, Q> rgb2YCoCg( - vec<3, T, Q> const& rgbColor); - - /// Convert a color from YCoCg color space to RGB color space. - /// @see gtx_color_space_YCoCg - template - GLM_FUNC_DECL vec<3, T, Q> YCoCg2rgb( - vec<3, T, Q> const& YCoCgColor); - - /// Convert a color from RGB color space to YCoCgR color space. - /// @see "YCoCg-R: A Color Space with RGB Reversibility and Low Dynamic Range" - /// @see gtx_color_space_YCoCg - template - GLM_FUNC_DECL vec<3, T, Q> rgb2YCoCgR( - vec<3, T, Q> const& rgbColor); - - /// Convert a color from YCoCgR color space to RGB color space. - /// @see "YCoCg-R: A Color Space with RGB Reversibility and Low Dynamic Range" - /// @see gtx_color_space_YCoCg - template - GLM_FUNC_DECL vec<3, T, Q> YCoCgR2rgb( - vec<3, T, Q> const& YCoCgColor); - - /// @} -}//namespace glm - -#include "color_space_YCoCg.inl" diff --git a/third_party/glm/gtx/color_space_YCoCg.inl b/third_party/glm/gtx/color_space_YCoCg.inl deleted file mode 100755 index 83ba857..0000000 --- a/third_party/glm/gtx/color_space_YCoCg.inl +++ /dev/null @@ -1,107 +0,0 @@ -/// @ref gtx_color_space_YCoCg - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCg - ( - vec<3, T, Q> const& rgbColor - ) - { - vec<3, T, Q> result; - result.x/*Y */ = rgbColor.r / T(4) + rgbColor.g / T(2) + rgbColor.b / T(4); - result.y/*Co*/ = rgbColor.r / T(2) + rgbColor.g * T(0) - rgbColor.b / T(2); - result.z/*Cg*/ = - rgbColor.r / T(4) + rgbColor.g / T(2) - rgbColor.b / T(4); - return result; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCg2rgb - ( - vec<3, T, Q> const& YCoCgColor - ) - { - vec<3, T, Q> result; - result.r = YCoCgColor.x + YCoCgColor.y - YCoCgColor.z; - result.g = YCoCgColor.x + YCoCgColor.z; - result.b = YCoCgColor.x - YCoCgColor.y - YCoCgColor.z; - return result; - } - - template - class compute_YCoCgR { - public: - static GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR - ( - vec<3, T, Q> const& rgbColor - ) - { - vec<3, T, Q> result; - result.x/*Y */ = rgbColor.g * static_cast(0.5) + (rgbColor.r + rgbColor.b) * static_cast(0.25); - result.y/*Co*/ = rgbColor.r - rgbColor.b; - result.z/*Cg*/ = rgbColor.g - (rgbColor.r + rgbColor.b) * static_cast(0.5); - return result; - } - - static GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb - ( - vec<3, T, Q> const& YCoCgRColor - ) - { - vec<3, T, Q> result; - T tmp = YCoCgRColor.x - (YCoCgRColor.z * static_cast(0.5)); - result.g = YCoCgRColor.z + tmp; - result.b = tmp - (YCoCgRColor.y * static_cast(0.5)); - result.r = result.b + YCoCgRColor.y; - return result; - } - }; - - template - class compute_YCoCgR { - public: - static GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR - ( - vec<3, T, Q> const& rgbColor - ) - { - vec<3, T, Q> result; - result.y/*Co*/ = rgbColor.r - rgbColor.b; - T tmp = rgbColor.b + (result.y >> 1); - result.z/*Cg*/ = rgbColor.g - tmp; - result.x/*Y */ = tmp + (result.z >> 1); - return result; - } - - static GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb - ( - vec<3, T, Q> const& YCoCgRColor - ) - { - vec<3, T, Q> result; - T tmp = YCoCgRColor.x - (YCoCgRColor.z >> 1); - result.g = YCoCgRColor.z + tmp; - result.b = tmp - (YCoCgRColor.y >> 1); - result.r = result.b + YCoCgRColor.y; - return result; - } - }; - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR - ( - vec<3, T, Q> const& rgbColor - ) - { - return compute_YCoCgR::is_integer>::rgb2YCoCgR(rgbColor); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb - ( - vec<3, T, Q> const& YCoCgRColor - ) - { - return compute_YCoCgR::is_integer>::YCoCgR2rgb(YCoCgRColor); - } -}//namespace glm diff --git a/third_party/glm/gtx/common.hpp b/third_party/glm/gtx/common.hpp deleted file mode 100755 index 254ada2..0000000 --- a/third_party/glm/gtx/common.hpp +++ /dev/null @@ -1,76 +0,0 @@ -/// @ref gtx_common -/// @file glm/gtx/common.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_common GLM_GTX_common -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// @brief Provide functions to increase the compatibility with Cg and HLSL languages - -#pragma once - -// Dependencies: -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include "../gtc/vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_common is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_common extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_common - /// @{ - - /// Returns true if x is a denormalized number - /// Numbers whose absolute value is too small to be represented in the normal format are represented in an alternate, denormalized format. - /// This format is less precise but can represent values closer to zero. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see GLSL isnan man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL typename genType::bool_type isdenormal(genType const& x); - - /// Similar to 'mod' but with a different rounding and integer support. - /// Returns 'x - y * trunc(x/y)' instead of 'x - y * floor(x/y)' - /// - /// @see GLSL mod vs HLSL fmod - /// @see GLSL mod man page - template - GLM_FUNC_DECL vec fmod(vec const& v); - - /// Returns whether vector components values are within an interval. A open interval excludes its endpoints, and is denoted with square brackets. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_vector_relational - template - GLM_FUNC_DECL vec openBounded(vec const& Value, vec const& Min, vec const& Max); - - /// Returns whether vector components values are within an interval. A closed interval includes its endpoints, and is denoted with square brackets. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_vector_relational - template - GLM_FUNC_DECL vec closeBounded(vec const& Value, vec const& Min, vec const& Max); - - /// @} -}//namespace glm - -#include "common.inl" diff --git a/third_party/glm/gtx/common.inl b/third_party/glm/gtx/common.inl deleted file mode 100755 index 4ad2126..0000000 --- a/third_party/glm/gtx/common.inl +++ /dev/null @@ -1,125 +0,0 @@ -/// @ref gtx_common - -#include -#include "../gtc/epsilon.hpp" -#include "../gtc/constants.hpp" - -namespace glm{ -namespace detail -{ - template - struct compute_fmod - { - GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) - { - return detail::functor2::call(std::fmod, a, b); - } - }; - - template - struct compute_fmod - { - GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) - { - return a % b; - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER bool isdenormal(T const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isdenormal' only accept floating-point inputs"); - -# if GLM_HAS_CXX11_STL - return std::fpclassify(x) == FP_SUBNORMAL; -# else - return epsilonNotEqual(x, static_cast(0), epsilon()) && std::fabs(x) < std::numeric_limits::min(); -# endif - } - - template - GLM_FUNC_QUALIFIER typename vec<1, T, Q>::bool_type isdenormal - ( - vec<1, T, Q> const& x - ) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isdenormal' only accept floating-point inputs"); - - return typename vec<1, T, Q>::bool_type( - isdenormal(x.x)); - } - - template - GLM_FUNC_QUALIFIER typename vec<2, T, Q>::bool_type isdenormal - ( - vec<2, T, Q> const& x - ) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isdenormal' only accept floating-point inputs"); - - return typename vec<2, T, Q>::bool_type( - isdenormal(x.x), - isdenormal(x.y)); - } - - template - GLM_FUNC_QUALIFIER typename vec<3, T, Q>::bool_type isdenormal - ( - vec<3, T, Q> const& x - ) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isdenormal' only accept floating-point inputs"); - - return typename vec<3, T, Q>::bool_type( - isdenormal(x.x), - isdenormal(x.y), - isdenormal(x.z)); - } - - template - GLM_FUNC_QUALIFIER typename vec<4, T, Q>::bool_type isdenormal - ( - vec<4, T, Q> const& x - ) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isdenormal' only accept floating-point inputs"); - - return typename vec<4, T, Q>::bool_type( - isdenormal(x.x), - isdenormal(x.y), - isdenormal(x.z), - isdenormal(x.w)); - } - - // fmod - template - GLM_FUNC_QUALIFIER genType fmod(genType x, genType y) - { - return fmod(vec<1, genType>(x), y).x; - } - - template - GLM_FUNC_QUALIFIER vec fmod(vec const& x, T y) - { - return detail::compute_fmod::is_iec559>::call(x, vec(y)); - } - - template - GLM_FUNC_QUALIFIER vec fmod(vec const& x, vec const& y) - { - return detail::compute_fmod::is_iec559>::call(x, y); - } - - template - GLM_FUNC_QUALIFIER vec openBounded(vec const& Value, vec const& Min, vec const& Max) - { - return greaterThan(Value, Min) && lessThan(Value, Max); - } - - template - GLM_FUNC_QUALIFIER vec closeBounded(vec const& Value, vec const& Min, vec const& Max) - { - return greaterThanEqual(Value, Min) && lessThanEqual(Value, Max); - } -}//namespace glm diff --git a/third_party/glm/gtx/compatibility.hpp b/third_party/glm/gtx/compatibility.hpp deleted file mode 100755 index f1b00a6..0000000 --- a/third_party/glm/gtx/compatibility.hpp +++ /dev/null @@ -1,133 +0,0 @@ -/// @ref gtx_compatibility -/// @file glm/gtx/compatibility.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_compatibility GLM_GTX_compatibility -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Provide functions to increase the compatibility with Cg and HLSL languages - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/quaternion.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_compatibility is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_compatibility extension included") -# endif -#endif - -#if GLM_COMPILER & GLM_COMPILER_VC -# include -#elif GLM_COMPILER & GLM_COMPILER_GCC -# include -# if(GLM_PLATFORM & GLM_PLATFORM_ANDROID) -# undef isfinite -# endif -#endif//GLM_COMPILER - -namespace glm -{ - /// @addtogroup gtx_compatibility - /// @{ - - template GLM_FUNC_QUALIFIER T lerp(T x, T y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<2, T, Q> lerp(const vec<2, T, Q>& x, const vec<2, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) - - template GLM_FUNC_QUALIFIER vec<3, T, Q> lerp(const vec<3, T, Q>& x, const vec<3, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<4, T, Q> lerp(const vec<4, T, Q>& x, const vec<4, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<2, T, Q> lerp(const vec<2, T, Q>& x, const vec<2, T, Q>& y, const vec<2, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<3, T, Q> lerp(const vec<3, T, Q>& x, const vec<3, T, Q>& y, const vec<3, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<4, T, Q> lerp(const vec<4, T, Q>& x, const vec<4, T, Q>& y, const vec<4, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) - - template GLM_FUNC_QUALIFIER T saturate(T x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<2, T, Q> saturate(const vec<2, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<3, T, Q> saturate(const vec<3, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<4, T, Q> saturate(const vec<4, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) - - template GLM_FUNC_QUALIFIER T atan2(T x, T y){return atan(x, y);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<2, T, Q> atan2(const vec<2, T, Q>& x, const vec<2, T, Q>& y){return atan(x, y);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<3, T, Q> atan2(const vec<3, T, Q>& x, const vec<3, T, Q>& y){return atan(x, y);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<4, T, Q> atan2(const vec<4, T, Q>& x, const vec<4, T, Q>& y){return atan(x, y);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) - - template GLM_FUNC_DECL bool isfinite(genType const& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) - template GLM_FUNC_DECL vec<1, bool, Q> isfinite(const vec<1, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) - template GLM_FUNC_DECL vec<2, bool, Q> isfinite(const vec<2, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) - template GLM_FUNC_DECL vec<3, bool, Q> isfinite(const vec<3, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) - template GLM_FUNC_DECL vec<4, bool, Q> isfinite(const vec<4, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) - - typedef bool bool1; //!< \brief boolean type with 1 component. (From GLM_GTX_compatibility extension) - typedef vec<2, bool, highp> bool2; //!< \brief boolean type with 2 components. (From GLM_GTX_compatibility extension) - typedef vec<3, bool, highp> bool3; //!< \brief boolean type with 3 components. (From GLM_GTX_compatibility extension) - typedef vec<4, bool, highp> bool4; //!< \brief boolean type with 4 components. (From GLM_GTX_compatibility extension) - - typedef bool bool1x1; //!< \brief boolean matrix with 1 x 1 component. (From GLM_GTX_compatibility extension) - typedef mat<2, 2, bool, highp> bool2x2; //!< \brief boolean matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 3, bool, highp> bool2x3; //!< \brief boolean matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 4, bool, highp> bool2x4; //!< \brief boolean matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 2, bool, highp> bool3x2; //!< \brief boolean matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 3, bool, highp> bool3x3; //!< \brief boolean matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 4, bool, highp> bool3x4; //!< \brief boolean matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 2, bool, highp> bool4x2; //!< \brief boolean matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 3, bool, highp> bool4x3; //!< \brief boolean matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 4, bool, highp> bool4x4; //!< \brief boolean matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) - - typedef int int1; //!< \brief integer vector with 1 component. (From GLM_GTX_compatibility extension) - typedef vec<2, int, highp> int2; //!< \brief integer vector with 2 components. (From GLM_GTX_compatibility extension) - typedef vec<3, int, highp> int3; //!< \brief integer vector with 3 components. (From GLM_GTX_compatibility extension) - typedef vec<4, int, highp> int4; //!< \brief integer vector with 4 components. (From GLM_GTX_compatibility extension) - - typedef int int1x1; //!< \brief integer matrix with 1 component. (From GLM_GTX_compatibility extension) - typedef mat<2, 2, int, highp> int2x2; //!< \brief integer matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 3, int, highp> int2x3; //!< \brief integer matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 4, int, highp> int2x4; //!< \brief integer matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 2, int, highp> int3x2; //!< \brief integer matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 3, int, highp> int3x3; //!< \brief integer matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 4, int, highp> int3x4; //!< \brief integer matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 2, int, highp> int4x2; //!< \brief integer matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 3, int, highp> int4x3; //!< \brief integer matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 4, int, highp> int4x4; //!< \brief integer matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) - - typedef float float1; //!< \brief single-qualifier floating-point vector with 1 component. (From GLM_GTX_compatibility extension) - typedef vec<2, float, highp> float2; //!< \brief single-qualifier floating-point vector with 2 components. (From GLM_GTX_compatibility extension) - typedef vec<3, float, highp> float3; //!< \brief single-qualifier floating-point vector with 3 components. (From GLM_GTX_compatibility extension) - typedef vec<4, float, highp> float4; //!< \brief single-qualifier floating-point vector with 4 components. (From GLM_GTX_compatibility extension) - - typedef float float1x1; //!< \brief single-qualifier floating-point matrix with 1 component. (From GLM_GTX_compatibility extension) - typedef mat<2, 2, float, highp> float2x2; //!< \brief single-qualifier floating-point matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 3, float, highp> float2x3; //!< \brief single-qualifier floating-point matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 4, float, highp> float2x4; //!< \brief single-qualifier floating-point matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 2, float, highp> float3x2; //!< \brief single-qualifier floating-point matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 3, float, highp> float3x3; //!< \brief single-qualifier floating-point matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 4, float, highp> float3x4; //!< \brief single-qualifier floating-point matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 2, float, highp> float4x2; //!< \brief single-qualifier floating-point matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 3, float, highp> float4x3; //!< \brief single-qualifier floating-point matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 4, float, highp> float4x4; //!< \brief single-qualifier floating-point matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) - - typedef double double1; //!< \brief double-qualifier floating-point vector with 1 component. (From GLM_GTX_compatibility extension) - typedef vec<2, double, highp> double2; //!< \brief double-qualifier floating-point vector with 2 components. (From GLM_GTX_compatibility extension) - typedef vec<3, double, highp> double3; //!< \brief double-qualifier floating-point vector with 3 components. (From GLM_GTX_compatibility extension) - typedef vec<4, double, highp> double4; //!< \brief double-qualifier floating-point vector with 4 components. (From GLM_GTX_compatibility extension) - - typedef double double1x1; //!< \brief double-qualifier floating-point matrix with 1 component. (From GLM_GTX_compatibility extension) - typedef mat<2, 2, double, highp> double2x2; //!< \brief double-qualifier floating-point matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 3, double, highp> double2x3; //!< \brief double-qualifier floating-point matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 4, double, highp> double2x4; //!< \brief double-qualifier floating-point matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 2, double, highp> double3x2; //!< \brief double-qualifier floating-point matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 3, double, highp> double3x3; //!< \brief double-qualifier floating-point matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 4, double, highp> double3x4; //!< \brief double-qualifier floating-point matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 2, double, highp> double4x2; //!< \brief double-qualifier floating-point matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 3, double, highp> double4x3; //!< \brief double-qualifier floating-point matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 4, double, highp> double4x4; //!< \brief double-qualifier floating-point matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) - - /// @} -}//namespace glm - -#include "compatibility.inl" diff --git a/third_party/glm/gtx/compatibility.inl b/third_party/glm/gtx/compatibility.inl deleted file mode 100755 index 1d49496..0000000 --- a/third_party/glm/gtx/compatibility.inl +++ /dev/null @@ -1,62 +0,0 @@ -#include - -namespace glm -{ - // isfinite - template - GLM_FUNC_QUALIFIER bool isfinite( - genType const& x) - { -# if GLM_HAS_CXX11_STL - return std::isfinite(x) != 0; -# elif GLM_COMPILER & GLM_COMPILER_VC - return _finite(x) != 0; -# elif GLM_COMPILER & GLM_COMPILER_GCC && GLM_PLATFORM & GLM_PLATFORM_ANDROID - return _isfinite(x) != 0; -# else - if (std::numeric_limits::is_integer || std::denorm_absent == std::numeric_limits::has_denorm) - return std::numeric_limits::min() <= x && std::numeric_limits::max() >= x; - else - return -std::numeric_limits::max() <= x && std::numeric_limits::max() >= x; -# endif - } - - template - GLM_FUNC_QUALIFIER vec<1, bool, Q> isfinite( - vec<1, T, Q> const& x) - { - return vec<1, bool, Q>( - isfinite(x.x)); - } - - template - GLM_FUNC_QUALIFIER vec<2, bool, Q> isfinite( - vec<2, T, Q> const& x) - { - return vec<2, bool, Q>( - isfinite(x.x), - isfinite(x.y)); - } - - template - GLM_FUNC_QUALIFIER vec<3, bool, Q> isfinite( - vec<3, T, Q> const& x) - { - return vec<3, bool, Q>( - isfinite(x.x), - isfinite(x.y), - isfinite(x.z)); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> isfinite( - vec<4, T, Q> const& x) - { - return vec<4, bool, Q>( - isfinite(x.x), - isfinite(x.y), - isfinite(x.z), - isfinite(x.w)); - } - -}//namespace glm diff --git a/third_party/glm/gtx/component_wise.hpp b/third_party/glm/gtx/component_wise.hpp deleted file mode 100755 index 34a2b0a..0000000 --- a/third_party/glm/gtx/component_wise.hpp +++ /dev/null @@ -1,69 +0,0 @@ -/// @ref gtx_component_wise -/// @file glm/gtx/component_wise.hpp -/// @date 2007-05-21 / 2011-06-07 -/// @author Christophe Riccio -/// -/// @see core (dependence) -/// -/// @defgroup gtx_component_wise GLM_GTX_component_wise -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Operations between components of a type - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_component_wise is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_component_wise extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_component_wise - /// @{ - - /// Convert an integer vector to a normalized float vector. - /// If the parameter value type is already a floating qualifier type, the value is passed through. - /// @see gtx_component_wise - template - GLM_FUNC_DECL vec compNormalize(vec const& v); - - /// Convert a normalized float vector to an integer vector. - /// If the parameter value type is already a floating qualifier type, the value is passed through. - /// @see gtx_component_wise - template - GLM_FUNC_DECL vec compScale(vec const& v); - - /// Add all vector components together. - /// @see gtx_component_wise - template - GLM_FUNC_DECL typename genType::value_type compAdd(genType const& v); - - /// Multiply all vector components together. - /// @see gtx_component_wise - template - GLM_FUNC_DECL typename genType::value_type compMul(genType const& v); - - /// Find the minimum value between single vector components. - /// @see gtx_component_wise - template - GLM_FUNC_DECL typename genType::value_type compMin(genType const& v); - - /// Find the maximum value between single vector components. - /// @see gtx_component_wise - template - GLM_FUNC_DECL typename genType::value_type compMax(genType const& v); - - /// @} -}//namespace glm - -#include "component_wise.inl" diff --git a/third_party/glm/gtx/component_wise.inl b/third_party/glm/gtx/component_wise.inl deleted file mode 100755 index cbbc7d4..0000000 --- a/third_party/glm/gtx/component_wise.inl +++ /dev/null @@ -1,127 +0,0 @@ -/// @ref gtx_component_wise - -#include - -namespace glm{ -namespace detail -{ - template - struct compute_compNormalize - {}; - - template - struct compute_compNormalize - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - floatType const Min = static_cast(std::numeric_limits::min()); - floatType const Max = static_cast(std::numeric_limits::max()); - return (vec(v) - Min) / (Max - Min) * static_cast(2) - static_cast(1); - } - }; - - template - struct compute_compNormalize - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - return vec(v) / static_cast(std::numeric_limits::max()); - } - }; - - template - struct compute_compNormalize - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - return v; - } - }; - - template - struct compute_compScale - {}; - - template - struct compute_compScale - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - floatType const Max = static_cast(std::numeric_limits::max()) + static_cast(0.5); - vec const Scaled(v * Max); - vec const Result(Scaled - static_cast(0.5)); - return Result; - } - }; - - template - struct compute_compScale - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - return vec(vec(v) * static_cast(std::numeric_limits::max())); - } - }; - - template - struct compute_compScale - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - return v; - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER vec compNormalize(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'compNormalize' accepts only floating-point types for 'floatType' template parameter"); - - return detail::compute_compNormalize::is_integer, std::numeric_limits::is_signed>::call(v); - } - - template - GLM_FUNC_QUALIFIER vec compScale(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'compScale' accepts only floating-point types for 'floatType' template parameter"); - - return detail::compute_compScale::is_integer, std::numeric_limits::is_signed>::call(v); - } - - template - GLM_FUNC_QUALIFIER T compAdd(vec const& v) - { - T Result(0); - for(length_t i = 0, n = v.length(); i < n; ++i) - Result += v[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER T compMul(vec const& v) - { - T Result(1); - for(length_t i = 0, n = v.length(); i < n; ++i) - Result *= v[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER T compMin(vec const& v) - { - T Result(v[0]); - for(length_t i = 1, n = v.length(); i < n; ++i) - Result = min(Result, v[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER T compMax(vec const& v) - { - T Result(v[0]); - for(length_t i = 1, n = v.length(); i < n; ++i) - Result = max(Result, v[i]); - return Result; - } -}//namespace glm diff --git a/third_party/glm/gtx/dual_quaternion.hpp b/third_party/glm/gtx/dual_quaternion.hpp deleted file mode 100755 index 6a51ab7..0000000 --- a/third_party/glm/gtx/dual_quaternion.hpp +++ /dev/null @@ -1,274 +0,0 @@ -/// @ref gtx_dual_quaternion -/// @file glm/gtx/dual_quaternion.hpp -/// @author Maksim Vorobiev (msomeone@gmail.com) -/// -/// @see core (dependence) -/// @see gtc_constants (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtx_dual_quaternion GLM_GTX_dual_quaternion -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Defines a templated dual-quaternion type and several dual-quaternion operations. - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/constants.hpp" -#include "../gtc/quaternion.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_dual_quaternion is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_dual_quaternion extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_dual_quaternion - /// @{ - - template - struct tdualquat - { - // -- Implementation detail -- - - typedef T value_type; - typedef qua part_type; - - // -- Data -- - - qua real, dual; - - // -- Component accesses -- - - typedef length_t length_type; - /// Return the count of components of a dual quaternion - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 2;} - - GLM_FUNC_DECL part_type & operator[](length_type i); - GLM_FUNC_DECL part_type const& operator[](length_type i) const; - - // -- Implicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR tdualquat() GLM_DEFAULT; - GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(tdualquat const& d) GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(tdualquat const& d); - - // -- Explicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua const& real); - GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua const& orientation, vec<3, T, Q> const& translation); - GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua const& real, qua const& dual); - - // -- Conversion constructors -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT tdualquat(tdualquat const& q); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR tdualquat(mat<2, 4, T, Q> const& holder_mat); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR tdualquat(mat<3, 4, T, Q> const& aug_mat); - - // -- Unary arithmetic operators -- - - GLM_FUNC_DECL tdualquat & operator=(tdualquat const& m) GLM_DEFAULT; - - template - GLM_FUNC_DECL tdualquat & operator=(tdualquat const& m); - template - GLM_FUNC_DECL tdualquat & operator*=(U s); - template - GLM_FUNC_DECL tdualquat & operator/=(U s); - }; - - // -- Unary bit operators -- - - template - GLM_FUNC_DECL tdualquat operator+(tdualquat const& q); - - template - GLM_FUNC_DECL tdualquat operator-(tdualquat const& q); - - // -- Binary operators -- - - template - GLM_FUNC_DECL tdualquat operator+(tdualquat const& q, tdualquat const& p); - - template - GLM_FUNC_DECL tdualquat operator*(tdualquat const& q, tdualquat const& p); - - template - GLM_FUNC_DECL vec<3, T, Q> operator*(tdualquat const& q, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL vec<3, T, Q> operator*(vec<3, T, Q> const& v, tdualquat const& q); - - template - GLM_FUNC_DECL vec<4, T, Q> operator*(tdualquat const& q, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL vec<4, T, Q> operator*(vec<4, T, Q> const& v, tdualquat const& q); - - template - GLM_FUNC_DECL tdualquat operator*(tdualquat const& q, T const& s); - - template - GLM_FUNC_DECL tdualquat operator*(T const& s, tdualquat const& q); - - template - GLM_FUNC_DECL tdualquat operator/(tdualquat const& q, T const& s); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(tdualquat const& q1, tdualquat const& q2); - - template - GLM_FUNC_DECL bool operator!=(tdualquat const& q1, tdualquat const& q2); - - /// Creates an identity dual quaternion. - /// - /// @see gtx_dual_quaternion - template - GLM_FUNC_DECL tdualquat dual_quat_identity(); - - /// Returns the normalized quaternion. - /// - /// @see gtx_dual_quaternion - template - GLM_FUNC_DECL tdualquat normalize(tdualquat const& q); - - /// Returns the linear interpolation of two dual quaternion. - /// - /// @see gtc_dual_quaternion - template - GLM_FUNC_DECL tdualquat lerp(tdualquat const& x, tdualquat const& y, T const& a); - - /// Returns the q inverse. - /// - /// @see gtx_dual_quaternion - template - GLM_FUNC_DECL tdualquat inverse(tdualquat const& q); - - /// Converts a quaternion to a 2 * 4 matrix. - /// - /// @see gtx_dual_quaternion - template - GLM_FUNC_DECL mat<2, 4, T, Q> mat2x4_cast(tdualquat const& x); - - /// Converts a quaternion to a 3 * 4 matrix. - /// - /// @see gtx_dual_quaternion - template - GLM_FUNC_DECL mat<3, 4, T, Q> mat3x4_cast(tdualquat const& x); - - /// Converts a 2 * 4 matrix (matrix which holds real and dual parts) to a quaternion. - /// - /// @see gtx_dual_quaternion - template - GLM_FUNC_DECL tdualquat dualquat_cast(mat<2, 4, T, Q> const& x); - - /// Converts a 3 * 4 matrix (augmented matrix rotation + translation) to a quaternion. - /// - /// @see gtx_dual_quaternion - template - GLM_FUNC_DECL tdualquat dualquat_cast(mat<3, 4, T, Q> const& x); - - - /// Dual-quaternion of low single-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat lowp_dualquat; - - /// Dual-quaternion of medium single-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat mediump_dualquat; - - /// Dual-quaternion of high single-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat highp_dualquat; - - - /// Dual-quaternion of low single-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat lowp_fdualquat; - - /// Dual-quaternion of medium single-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat mediump_fdualquat; - - /// Dual-quaternion of high single-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat highp_fdualquat; - - - /// Dual-quaternion of low double-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat lowp_ddualquat; - - /// Dual-quaternion of medium double-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat mediump_ddualquat; - - /// Dual-quaternion of high double-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat highp_ddualquat; - - -#if(!defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT)) - /// Dual-quaternion of floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef highp_fdualquat dualquat; - - /// Dual-quaternion of single-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef highp_fdualquat fdualquat; -#elif(defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT)) - typedef highp_fdualquat dualquat; - typedef highp_fdualquat fdualquat; -#elif(!defined(GLM_PRECISION_HIGHP_FLOAT) && defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT)) - typedef mediump_fdualquat dualquat; - typedef mediump_fdualquat fdualquat; -#elif(!defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && defined(GLM_PRECISION_LOWP_FLOAT)) - typedef lowp_fdualquat dualquat; - typedef lowp_fdualquat fdualquat; -#else -# error "GLM error: multiple default precision requested for single-precision floating-point types" -#endif - - -#if(!defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE)) - /// Dual-quaternion of default double-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef highp_ddualquat ddualquat; -#elif(defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE)) - typedef highp_ddualquat ddualquat; -#elif(!defined(GLM_PRECISION_HIGHP_DOUBLE) && defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE)) - typedef mediump_ddualquat ddualquat; -#elif(!defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && defined(GLM_PRECISION_LOWP_DOUBLE)) - typedef lowp_ddualquat ddualquat; -#else -# error "GLM error: Multiple default precision requested for double-precision floating-point types" -#endif - - /// @} -} //namespace glm - -#include "dual_quaternion.inl" diff --git a/third_party/glm/gtx/dual_quaternion.inl b/third_party/glm/gtx/dual_quaternion.inl deleted file mode 100755 index fad07ea..0000000 --- a/third_party/glm/gtx/dual_quaternion.inl +++ /dev/null @@ -1,352 +0,0 @@ -/// @ref gtx_dual_quaternion - -#include "../geometric.hpp" -#include - -namespace glm -{ - // -- Component accesses -- - - template - GLM_FUNC_QUALIFIER typename tdualquat::part_type & tdualquat::operator[](typename tdualquat::length_type i) - { - assert(i >= 0 && i < this->length()); - return (&real)[i]; - } - - template - GLM_FUNC_QUALIFIER typename tdualquat::part_type const& tdualquat::operator[](typename tdualquat::length_type i) const - { - assert(i >= 0 && i < this->length()); - return (&real)[i]; - } - - // -- Implicit basic constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat() -# if GLM_CONFIG_DEFAULTED_FUNCTIONS != GLM_DISABLE - : real(qua()) - , dual(qua(0, 0, 0, 0)) -# endif - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(tdualquat const& d) - : real(d.real) - , dual(d.dual) - {} -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(tdualquat const& d) - : real(d.real) - , dual(d.dual) - {} - - // -- Explicit basic constructors -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(qua const& r) - : real(r), dual(qua(0, 0, 0, 0)) - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(qua const& q, vec<3, T, Q> const& p) - : real(q), dual( - T(-0.5) * ( p.x*q.x + p.y*q.y + p.z*q.z), - T(+0.5) * ( p.x*q.w + p.y*q.z - p.z*q.y), - T(+0.5) * (-p.x*q.z + p.y*q.w + p.z*q.x), - T(+0.5) * ( p.x*q.y - p.y*q.x + p.z*q.w)) - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(qua const& r, qua const& d) - : real(r), dual(d) - {} - - // -- Conversion constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(tdualquat const& q) - : real(q.real) - , dual(q.dual) - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(mat<2, 4, T, Q> const& m) - { - *this = dualquat_cast(m); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(mat<3, 4, T, Q> const& m) - { - *this = dualquat_cast(m); - } - - // -- Unary arithmetic operators -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER tdualquat & tdualquat::operator=(tdualquat const& q) - { - this->real = q.real; - this->dual = q.dual; - return *this; - } -# endif - - template - template - GLM_FUNC_QUALIFIER tdualquat & tdualquat::operator=(tdualquat const& q) - { - this->real = q.real; - this->dual = q.dual; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER tdualquat & tdualquat::operator*=(U s) - { - this->real *= static_cast(s); - this->dual *= static_cast(s); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER tdualquat & tdualquat::operator/=(U s) - { - this->real /= static_cast(s); - this->dual /= static_cast(s); - return *this; - } - - // -- Unary bit operators -- - - template - GLM_FUNC_QUALIFIER tdualquat operator+(tdualquat const& q) - { - return q; - } - - template - GLM_FUNC_QUALIFIER tdualquat operator-(tdualquat const& q) - { - return tdualquat(-q.real, -q.dual); - } - - // -- Binary operators -- - - template - GLM_FUNC_QUALIFIER tdualquat operator+(tdualquat const& q, tdualquat const& p) - { - return tdualquat(q.real + p.real,q.dual + p.dual); - } - - template - GLM_FUNC_QUALIFIER tdualquat operator*(tdualquat const& p, tdualquat const& o) - { - return tdualquat(p.real * o.real,p.real * o.dual + p.dual * o.real); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> operator*(tdualquat const& q, vec<3, T, Q> const& v) - { - vec<3, T, Q> const real_v3(q.real.x,q.real.y,q.real.z); - vec<3, T, Q> const dual_v3(q.dual.x,q.dual.y,q.dual.z); - return (cross(real_v3, cross(real_v3,v) + v * q.real.w + dual_v3) + dual_v3 * q.real.w - real_v3 * q.dual.w) * T(2) + v; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> operator*(vec<3, T, Q> const& v, tdualquat const& q) - { - return glm::inverse(q) * v; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> operator*(tdualquat const& q, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(q * vec<3, T, Q>(v), v.w); - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> operator*(vec<4, T, Q> const& v, tdualquat const& q) - { - return glm::inverse(q) * v; - } - - template - GLM_FUNC_QUALIFIER tdualquat operator*(tdualquat const& q, T const& s) - { - return tdualquat(q.real * s, q.dual * s); - } - - template - GLM_FUNC_QUALIFIER tdualquat operator*(T const& s, tdualquat const& q) - { - return q * s; - } - - template - GLM_FUNC_QUALIFIER tdualquat operator/(tdualquat const& q, T const& s) - { - return tdualquat(q.real / s, q.dual / s); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(tdualquat const& q1, tdualquat const& q2) - { - return (q1.real == q2.real) && (q1.dual == q2.dual); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(tdualquat const& q1, tdualquat const& q2) - { - return (q1.real != q2.real) || (q1.dual != q2.dual); - } - - // -- Operations -- - - template - GLM_FUNC_QUALIFIER tdualquat dual_quat_identity() - { - return tdualquat( - qua(static_cast(1), static_cast(0), static_cast(0), static_cast(0)), - qua(static_cast(0), static_cast(0), static_cast(0), static_cast(0))); - } - - template - GLM_FUNC_QUALIFIER tdualquat normalize(tdualquat const& q) - { - return q / length(q.real); - } - - template - GLM_FUNC_QUALIFIER tdualquat lerp(tdualquat const& x, tdualquat const& y, T const& a) - { - // Dual Quaternion Linear blend aka DLB: - // Lerp is only defined in [0, 1] - assert(a >= static_cast(0)); - assert(a <= static_cast(1)); - T const k = dot(x.real,y.real) < static_cast(0) ? -a : a; - T const one(1); - return tdualquat(x * (one - a) + y * k); - } - - template - GLM_FUNC_QUALIFIER tdualquat inverse(tdualquat const& q) - { - const glm::qua real = conjugate(q.real); - const glm::qua dual = conjugate(q.dual); - return tdualquat(real, dual + (real * (-2.0f * dot(real,dual)))); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> mat2x4_cast(tdualquat const& x) - { - return mat<2, 4, T, Q>( x[0].x, x[0].y, x[0].z, x[0].w, x[1].x, x[1].y, x[1].z, x[1].w ); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> mat3x4_cast(tdualquat const& x) - { - qua r = x.real / length2(x.real); - - qua const rr(r.w * x.real.w, r.x * x.real.x, r.y * x.real.y, r.z * x.real.z); - r *= static_cast(2); - - T const xy = r.x * x.real.y; - T const xz = r.x * x.real.z; - T const yz = r.y * x.real.z; - T const wx = r.w * x.real.x; - T const wy = r.w * x.real.y; - T const wz = r.w * x.real.z; - - vec<4, T, Q> const a( - rr.w + rr.x - rr.y - rr.z, - xy - wz, - xz + wy, - -(x.dual.w * r.x - x.dual.x * r.w + x.dual.y * r.z - x.dual.z * r.y)); - - vec<4, T, Q> const b( - xy + wz, - rr.w + rr.y - rr.x - rr.z, - yz - wx, - -(x.dual.w * r.y - x.dual.x * r.z - x.dual.y * r.w + x.dual.z * r.x)); - - vec<4, T, Q> const c( - xz - wy, - yz + wx, - rr.w + rr.z - rr.x - rr.y, - -(x.dual.w * r.z + x.dual.x * r.y - x.dual.y * r.x - x.dual.z * r.w)); - - return mat<3, 4, T, Q>(a, b, c); - } - - template - GLM_FUNC_QUALIFIER tdualquat dualquat_cast(mat<2, 4, T, Q> const& x) - { - return tdualquat( - qua( x[0].w, x[0].x, x[0].y, x[0].z ), - qua( x[1].w, x[1].x, x[1].y, x[1].z )); - } - - template - GLM_FUNC_QUALIFIER tdualquat dualquat_cast(mat<3, 4, T, Q> const& x) - { - qua real; - - T const trace = x[0].x + x[1].y + x[2].z; - if(trace > static_cast(0)) - { - T const r = sqrt(T(1) + trace); - T const invr = static_cast(0.5) / r; - real.w = static_cast(0.5) * r; - real.x = (x[2].y - x[1].z) * invr; - real.y = (x[0].z - x[2].x) * invr; - real.z = (x[1].x - x[0].y) * invr; - } - else if(x[0].x > x[1].y && x[0].x > x[2].z) - { - T const r = sqrt(T(1) + x[0].x - x[1].y - x[2].z); - T const invr = static_cast(0.5) / r; - real.x = static_cast(0.5)*r; - real.y = (x[1].x + x[0].y) * invr; - real.z = (x[0].z + x[2].x) * invr; - real.w = (x[2].y - x[1].z) * invr; - } - else if(x[1].y > x[2].z) - { - T const r = sqrt(T(1) + x[1].y - x[0].x - x[2].z); - T const invr = static_cast(0.5) / r; - real.x = (x[1].x + x[0].y) * invr; - real.y = static_cast(0.5) * r; - real.z = (x[2].y + x[1].z) * invr; - real.w = (x[0].z - x[2].x) * invr; - } - else - { - T const r = sqrt(T(1) + x[2].z - x[0].x - x[1].y); - T const invr = static_cast(0.5) / r; - real.x = (x[0].z + x[2].x) * invr; - real.y = (x[2].y + x[1].z) * invr; - real.z = static_cast(0.5) * r; - real.w = (x[1].x - x[0].y) * invr; - } - - qua dual; - dual.x = static_cast(0.5) * ( x[0].w * real.w + x[1].w * real.z - x[2].w * real.y); - dual.y = static_cast(0.5) * (-x[0].w * real.z + x[1].w * real.w + x[2].w * real.x); - dual.z = static_cast(0.5) * ( x[0].w * real.y - x[1].w * real.x + x[2].w * real.w); - dual.w = -static_cast(0.5) * ( x[0].w * real.x + x[1].w * real.y + x[2].w * real.z); - return tdualquat(real, dual); - } -}//namespace glm diff --git a/third_party/glm/gtx/easing.hpp b/third_party/glm/gtx/easing.hpp deleted file mode 100755 index 57f3d61..0000000 --- a/third_party/glm/gtx/easing.hpp +++ /dev/null @@ -1,219 +0,0 @@ -/// @ref gtx_easing -/// @file glm/gtx/easing.hpp -/// @author Robert Chisholm -/// -/// @see core (dependence) -/// -/// @defgroup gtx_easing GLM_GTX_easing -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Easing functions for animations and transitons -/// All functions take a parameter x in the range [0.0,1.0] -/// -/// Based on the AHEasing project of Warren Moore (https://github.com/warrenm/AHEasing) - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/constants.hpp" -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_easing is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_easing extension included") -# endif -#endif - -namespace glm{ - /// @addtogroup gtx_easing - /// @{ - - /// Modelled after the line y = x - /// @see gtx_easing - template - GLM_FUNC_DECL genType linearInterpolation(genType const & a); - - /// Modelled after the parabola y = x^2 - /// @see gtx_easing - template - GLM_FUNC_DECL genType quadraticEaseIn(genType const & a); - - /// Modelled after the parabola y = -x^2 + 2x - /// @see gtx_easing - template - GLM_FUNC_DECL genType quadraticEaseOut(genType const & a); - - /// Modelled after the piecewise quadratic - /// y = (1/2)((2x)^2) ; [0, 0.5) - /// y = -(1/2)((2x-1)*(2x-3) - 1) ; [0.5, 1] - /// @see gtx_easing - template - GLM_FUNC_DECL genType quadraticEaseInOut(genType const & a); - - /// Modelled after the cubic y = x^3 - template - GLM_FUNC_DECL genType cubicEaseIn(genType const & a); - - /// Modelled after the cubic y = (x - 1)^3 + 1 - /// @see gtx_easing - template - GLM_FUNC_DECL genType cubicEaseOut(genType const & a); - - /// Modelled after the piecewise cubic - /// y = (1/2)((2x)^3) ; [0, 0.5) - /// y = (1/2)((2x-2)^3 + 2) ; [0.5, 1] - /// @see gtx_easing - template - GLM_FUNC_DECL genType cubicEaseInOut(genType const & a); - - /// Modelled after the quartic x^4 - /// @see gtx_easing - template - GLM_FUNC_DECL genType quarticEaseIn(genType const & a); - - /// Modelled after the quartic y = 1 - (x - 1)^4 - /// @see gtx_easing - template - GLM_FUNC_DECL genType quarticEaseOut(genType const & a); - - /// Modelled after the piecewise quartic - /// y = (1/2)((2x)^4) ; [0, 0.5) - /// y = -(1/2)((2x-2)^4 - 2) ; [0.5, 1] - /// @see gtx_easing - template - GLM_FUNC_DECL genType quarticEaseInOut(genType const & a); - - /// Modelled after the quintic y = x^5 - /// @see gtx_easing - template - GLM_FUNC_DECL genType quinticEaseIn(genType const & a); - - /// Modelled after the quintic y = (x - 1)^5 + 1 - /// @see gtx_easing - template - GLM_FUNC_DECL genType quinticEaseOut(genType const & a); - - /// Modelled after the piecewise quintic - /// y = (1/2)((2x)^5) ; [0, 0.5) - /// y = (1/2)((2x-2)^5 + 2) ; [0.5, 1] - /// @see gtx_easing - template - GLM_FUNC_DECL genType quinticEaseInOut(genType const & a); - - /// Modelled after quarter-cycle of sine wave - /// @see gtx_easing - template - GLM_FUNC_DECL genType sineEaseIn(genType const & a); - - /// Modelled after quarter-cycle of sine wave (different phase) - /// @see gtx_easing - template - GLM_FUNC_DECL genType sineEaseOut(genType const & a); - - /// Modelled after half sine wave - /// @see gtx_easing - template - GLM_FUNC_DECL genType sineEaseInOut(genType const & a); - - /// Modelled after shifted quadrant IV of unit circle - /// @see gtx_easing - template - GLM_FUNC_DECL genType circularEaseIn(genType const & a); - - /// Modelled after shifted quadrant II of unit circle - /// @see gtx_easing - template - GLM_FUNC_DECL genType circularEaseOut(genType const & a); - - /// Modelled after the piecewise circular function - /// y = (1/2)(1 - sqrt(1 - 4x^2)) ; [0, 0.5) - /// y = (1/2)(sqrt(-(2x - 3)*(2x - 1)) + 1) ; [0.5, 1] - /// @see gtx_easing - template - GLM_FUNC_DECL genType circularEaseInOut(genType const & a); - - /// Modelled after the exponential function y = 2^(10(x - 1)) - /// @see gtx_easing - template - GLM_FUNC_DECL genType exponentialEaseIn(genType const & a); - - /// Modelled after the exponential function y = -2^(-10x) + 1 - /// @see gtx_easing - template - GLM_FUNC_DECL genType exponentialEaseOut(genType const & a); - - /// Modelled after the piecewise exponential - /// y = (1/2)2^(10(2x - 1)) ; [0,0.5) - /// y = -(1/2)*2^(-10(2x - 1))) + 1 ; [0.5,1] - /// @see gtx_easing - template - GLM_FUNC_DECL genType exponentialEaseInOut(genType const & a); - - /// Modelled after the damped sine wave y = sin(13pi/2*x)*pow(2, 10 * (x - 1)) - /// @see gtx_easing - template - GLM_FUNC_DECL genType elasticEaseIn(genType const & a); - - /// Modelled after the damped sine wave y = sin(-13pi/2*(x + 1))*pow(2, -10x) + 1 - /// @see gtx_easing - template - GLM_FUNC_DECL genType elasticEaseOut(genType const & a); - - /// Modelled after the piecewise exponentially-damped sine wave: - /// y = (1/2)*sin(13pi/2*(2*x))*pow(2, 10 * ((2*x) - 1)) ; [0,0.5) - /// y = (1/2)*(sin(-13pi/2*((2x-1)+1))*pow(2,-10(2*x-1)) + 2) ; [0.5, 1] - /// @see gtx_easing - template - GLM_FUNC_DECL genType elasticEaseInOut(genType const & a); - - /// @see gtx_easing - template - GLM_FUNC_DECL genType backEaseIn(genType const& a); - - /// @see gtx_easing - template - GLM_FUNC_DECL genType backEaseOut(genType const& a); - - /// @see gtx_easing - template - GLM_FUNC_DECL genType backEaseInOut(genType const& a); - - /// @param a parameter - /// @param o Optional overshoot modifier - /// @see gtx_easing - template - GLM_FUNC_DECL genType backEaseIn(genType const& a, genType const& o); - - /// @param a parameter - /// @param o Optional overshoot modifier - /// @see gtx_easing - template - GLM_FUNC_DECL genType backEaseOut(genType const& a, genType const& o); - - /// @param a parameter - /// @param o Optional overshoot modifier - /// @see gtx_easing - template - GLM_FUNC_DECL genType backEaseInOut(genType const& a, genType const& o); - - /// @see gtx_easing - template - GLM_FUNC_DECL genType bounceEaseIn(genType const& a); - - /// @see gtx_easing - template - GLM_FUNC_DECL genType bounceEaseOut(genType const& a); - - /// @see gtx_easing - template - GLM_FUNC_DECL genType bounceEaseInOut(genType const& a); - - /// @} -}//namespace glm - -#include "easing.inl" diff --git a/third_party/glm/gtx/easing.inl b/third_party/glm/gtx/easing.inl deleted file mode 100755 index 4b7d05b..0000000 --- a/third_party/glm/gtx/easing.inl +++ /dev/null @@ -1,436 +0,0 @@ -/// @ref gtx_easing - -#include - -namespace glm{ - - template - GLM_FUNC_QUALIFIER genType linearInterpolation(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return a; - } - - template - GLM_FUNC_QUALIFIER genType quadraticEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return a * a; - } - - template - GLM_FUNC_QUALIFIER genType quadraticEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return -(a * (a - static_cast(2))); - } - - template - GLM_FUNC_QUALIFIER genType quadraticEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(0.5)) - { - return static_cast(2) * a * a; - } - else - { - return (-static_cast(2) * a * a) + (4 * a) - one(); - } - } - - template - GLM_FUNC_QUALIFIER genType cubicEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return a * a * a; - } - - template - GLM_FUNC_QUALIFIER genType cubicEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - genType const f = a - one(); - return f * f * f + one(); - } - - template - GLM_FUNC_QUALIFIER genType cubicEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if (a < static_cast(0.5)) - { - return static_cast(4) * a * a * a; - } - else - { - genType const f = ((static_cast(2) * a) - static_cast(2)); - return static_cast(0.5) * f * f * f + one(); - } - } - - template - GLM_FUNC_QUALIFIER genType quarticEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return a * a * a * a; - } - - template - GLM_FUNC_QUALIFIER genType quarticEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - genType const f = (a - one()); - return f * f * f * (one() - a) + one(); - } - - template - GLM_FUNC_QUALIFIER genType quarticEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(0.5)) - { - return static_cast(8) * a * a * a * a; - } - else - { - genType const f = (a - one()); - return -static_cast(8) * f * f * f * f + one(); - } - } - - template - GLM_FUNC_QUALIFIER genType quinticEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return a * a * a * a * a; - } - - template - GLM_FUNC_QUALIFIER genType quinticEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - genType const f = (a - one()); - return f * f * f * f * f + one(); - } - - template - GLM_FUNC_QUALIFIER genType quinticEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(0.5)) - { - return static_cast(16) * a * a * a * a * a; - } - else - { - genType const f = ((static_cast(2) * a) - static_cast(2)); - return static_cast(0.5) * f * f * f * f * f + one(); - } - } - - template - GLM_FUNC_QUALIFIER genType sineEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return sin((a - one()) * half_pi()) + one(); - } - - template - GLM_FUNC_QUALIFIER genType sineEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return sin(a * half_pi()); - } - - template - GLM_FUNC_QUALIFIER genType sineEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return static_cast(0.5) * (one() - cos(a * pi())); - } - - template - GLM_FUNC_QUALIFIER genType circularEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return one() - sqrt(one() - (a * a)); - } - - template - GLM_FUNC_QUALIFIER genType circularEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return sqrt((static_cast(2) - a) * a); - } - - template - GLM_FUNC_QUALIFIER genType circularEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(0.5)) - { - return static_cast(0.5) * (one() - std::sqrt(one() - static_cast(4) * (a * a))); - } - else - { - return static_cast(0.5) * (std::sqrt(-((static_cast(2) * a) - static_cast(3)) * ((static_cast(2) * a) - one())) + one()); - } - } - - template - GLM_FUNC_QUALIFIER genType exponentialEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a <= zero()) - return a; - else - { - genType const Complementary = a - one(); - genType const Two = static_cast(2); - - return glm::pow(Two, Complementary * static_cast(10)); - } - } - - template - GLM_FUNC_QUALIFIER genType exponentialEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a >= one()) - return a; - else - { - return one() - glm::pow(static_cast(2), -static_cast(10) * a); - } - } - - template - GLM_FUNC_QUALIFIER genType exponentialEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(0.5)) - return static_cast(0.5) * glm::pow(static_cast(2), (static_cast(20) * a) - static_cast(10)); - else - return -static_cast(0.5) * glm::pow(static_cast(2), (-static_cast(20) * a) + static_cast(10)) + one(); - } - - template - GLM_FUNC_QUALIFIER genType elasticEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return std::sin(static_cast(13) * half_pi() * a) * glm::pow(static_cast(2), static_cast(10) * (a - one())); - } - - template - GLM_FUNC_QUALIFIER genType elasticEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return std::sin(-static_cast(13) * half_pi() * (a + one())) * glm::pow(static_cast(2), -static_cast(10) * a) + one(); - } - - template - GLM_FUNC_QUALIFIER genType elasticEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(0.5)) - return static_cast(0.5) * std::sin(static_cast(13) * half_pi() * (static_cast(2) * a)) * glm::pow(static_cast(2), static_cast(10) * ((static_cast(2) * a) - one())); - else - return static_cast(0.5) * (std::sin(-static_cast(13) * half_pi() * ((static_cast(2) * a - one()) + one())) * glm::pow(static_cast(2), -static_cast(10) * (static_cast(2) * a - one())) + static_cast(2)); - } - - template - GLM_FUNC_QUALIFIER genType backEaseIn(genType const& a, genType const& o) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - genType z = ((o + one()) * a) - o; - return (a * a * z); - } - - template - GLM_FUNC_QUALIFIER genType backEaseOut(genType const& a, genType const& o) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - genType n = a - one(); - genType z = ((o + one()) * n) + o; - return (n * n * z) + one(); - } - - template - GLM_FUNC_QUALIFIER genType backEaseInOut(genType const& a, genType const& o) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - genType s = o * static_cast(1.525); - genType x = static_cast(0.5); - genType n = a / static_cast(0.5); - - if (n < static_cast(1)) - { - genType z = ((s + static_cast(1)) * n) - s; - genType m = n * n * z; - return x * m; - } - else - { - n -= static_cast(2); - genType z = ((s + static_cast(1)) * n) + s; - genType m = (n*n*z) + static_cast(2); - return x * m; - } - } - - template - GLM_FUNC_QUALIFIER genType backEaseIn(genType const& a) - { - return backEaseIn(a, static_cast(1.70158)); - } - - template - GLM_FUNC_QUALIFIER genType backEaseOut(genType const& a) - { - return backEaseOut(a, static_cast(1.70158)); - } - - template - GLM_FUNC_QUALIFIER genType backEaseInOut(genType const& a) - { - return backEaseInOut(a, static_cast(1.70158)); - } - - template - GLM_FUNC_QUALIFIER genType bounceEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(4.0 / 11.0)) - { - return (static_cast(121) * a * a) / static_cast(16); - } - else if(a < static_cast(8.0 / 11.0)) - { - return (static_cast(363.0 / 40.0) * a * a) - (static_cast(99.0 / 10.0) * a) + static_cast(17.0 / 5.0); - } - else if(a < static_cast(9.0 / 10.0)) - { - return (static_cast(4356.0 / 361.0) * a * a) - (static_cast(35442.0 / 1805.0) * a) + static_cast(16061.0 / 1805.0); - } - else - { - return (static_cast(54.0 / 5.0) * a * a) - (static_cast(513.0 / 25.0) * a) + static_cast(268.0 / 25.0); - } - } - - template - GLM_FUNC_QUALIFIER genType bounceEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return one() - bounceEaseOut(one() - a); - } - - template - GLM_FUNC_QUALIFIER genType bounceEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(0.5)) - { - return static_cast(0.5) * (one() - bounceEaseOut(a * static_cast(2))); - } - else - { - return static_cast(0.5) * bounceEaseOut(a * static_cast(2) - one()) + static_cast(0.5); - } - } - -}//namespace glm diff --git a/third_party/glm/gtx/euler_angles.hpp b/third_party/glm/gtx/euler_angles.hpp deleted file mode 100755 index 2723697..0000000 --- a/third_party/glm/gtx/euler_angles.hpp +++ /dev/null @@ -1,335 +0,0 @@ -/// @ref gtx_euler_angles -/// @file glm/gtx/euler_angles.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_euler_angles GLM_GTX_euler_angles -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Build matrices from Euler angles. -/// -/// Extraction of Euler angles from rotation matrix. -/// Based on the original paper 2014 Mike Day - Extracting Euler Angles from a Rotation Matrix. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_euler_angles is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_euler_angles extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_euler_angles - /// @{ - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle X. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleX( - T const& angleX); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle Y. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleY( - T const& angleY); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle Z. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZ( - T const& angleZ); - - /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about X-axis. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleX( - T const & angleX, T const & angularVelocityX); - - /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about Y-axis. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleY( - T const & angleY, T const & angularVelocityY); - - /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about Z-axis. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleZ( - T const & angleZ, T const & angularVelocityZ); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXY( - T const& angleX, - T const& angleY); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYX( - T const& angleY, - T const& angleX); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZ( - T const& angleX, - T const& angleZ); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZX( - T const& angle, - T const& angleX); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZ( - T const& angleY, - T const& angleZ); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZY( - T const& angleZ, - T const& angleY); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXYZ( - T const& t1, - T const& t2, - T const& t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYXZ( - T const& yaw, - T const& pitch, - T const& roll); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z * X). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZX( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y * X). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXYX( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Y). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYXY( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z * Y). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZY( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZYZ( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZXZ( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z * Y). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZY( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z * X). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZX( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y * X). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZYX( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X * Y). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZXY( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> yawPitchRoll( - T const& yaw, - T const& pitch, - T const& roll); - - /// Creates a 2D 2 * 2 rotation matrix from an euler angle. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<2, 2, T, defaultp> orientate2(T const& angle); - - /// Creates a 2D 4 * 4 homogeneous rotation matrix from an euler angle. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<3, 3, T, defaultp> orientate3(T const& angle); - - /// Creates a 3D 3 * 3 rotation matrix from euler angles (Y * X * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<3, 3, T, Q> orientate3(vec<3, T, Q> const& angles); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, Q> orientate4(vec<3, T, Q> const& angles); - - /// Extracts the (X * Y * Z) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleXYZ(mat<4, 4, T, defaultp> const& M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Y * X * Z) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleYXZ(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (X * Z * X) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleXZX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (X * Y * X) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleXYX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Y * X * Y) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleYXY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Y * Z * Y) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleYZY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Z * Y * Z) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleZYZ(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Z * X * Z) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleZXZ(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (X * Z * Y) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleXZY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Y * Z * X) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleYZX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Z * Y * X) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleZYX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Z * X * Y) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleZXY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// @} -}//namespace glm - -#include "euler_angles.inl" diff --git a/third_party/glm/gtx/euler_angles.inl b/third_party/glm/gtx/euler_angles.inl deleted file mode 100755 index 68c5012..0000000 --- a/third_party/glm/gtx/euler_angles.inl +++ /dev/null @@ -1,899 +0,0 @@ -/// @ref gtx_euler_angles - -#include "compatibility.hpp" // glm::atan2 - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleX - ( - T const& angleX - ) - { - T cosX = glm::cos(angleX); - T sinX = glm::sin(angleX); - - return mat<4, 4, T, defaultp>( - T(1), T(0), T(0), T(0), - T(0), cosX, sinX, T(0), - T(0),-sinX, cosX, T(0), - T(0), T(0), T(0), T(1)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleY - ( - T const& angleY - ) - { - T cosY = glm::cos(angleY); - T sinY = glm::sin(angleY); - - return mat<4, 4, T, defaultp>( - cosY, T(0), -sinY, T(0), - T(0), T(1), T(0), T(0), - sinY, T(0), cosY, T(0), - T(0), T(0), T(0), T(1)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZ - ( - T const& angleZ - ) - { - T cosZ = glm::cos(angleZ); - T sinZ = glm::sin(angleZ); - - return mat<4, 4, T, defaultp>( - cosZ, sinZ, T(0), T(0), - -sinZ, cosZ, T(0), T(0), - T(0), T(0), T(1), T(0), - T(0), T(0), T(0), T(1)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleX - ( - T const & angleX, - T const & angularVelocityX - ) - { - T cosX = glm::cos(angleX) * angularVelocityX; - T sinX = glm::sin(angleX) * angularVelocityX; - - return mat<4, 4, T, defaultp>( - T(0), T(0), T(0), T(0), - T(0),-sinX, cosX, T(0), - T(0),-cosX,-sinX, T(0), - T(0), T(0), T(0), T(0)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleY - ( - T const & angleY, - T const & angularVelocityY - ) - { - T cosY = glm::cos(angleY) * angularVelocityY; - T sinY = glm::sin(angleY) * angularVelocityY; - - return mat<4, 4, T, defaultp>( - -sinY, T(0), -cosY, T(0), - T(0), T(0), T(0), T(0), - cosY, T(0), -sinY, T(0), - T(0), T(0), T(0), T(0)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleZ - ( - T const & angleZ, - T const & angularVelocityZ - ) - { - T cosZ = glm::cos(angleZ) * angularVelocityZ; - T sinZ = glm::sin(angleZ) * angularVelocityZ; - - return mat<4, 4, T, defaultp>( - -sinZ, cosZ, T(0), T(0), - -cosZ, -sinZ, T(0), T(0), - T(0), T(0), T(0), T(0), - T(0), T(0), T(0), T(0)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXY - ( - T const& angleX, - T const& angleY - ) - { - T cosX = glm::cos(angleX); - T sinX = glm::sin(angleX); - T cosY = glm::cos(angleY); - T sinY = glm::sin(angleY); - - return mat<4, 4, T, defaultp>( - cosY, -sinX * -sinY, cosX * -sinY, T(0), - T(0), cosX, sinX, T(0), - sinY, -sinX * cosY, cosX * cosY, T(0), - T(0), T(0), T(0), T(1)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYX - ( - T const& angleY, - T const& angleX - ) - { - T cosX = glm::cos(angleX); - T sinX = glm::sin(angleX); - T cosY = glm::cos(angleY); - T sinY = glm::sin(angleY); - - return mat<4, 4, T, defaultp>( - cosY, 0, -sinY, T(0), - sinY * sinX, cosX, cosY * sinX, T(0), - sinY * cosX, -sinX, cosY * cosX, T(0), - T(0), T(0), T(0), T(1)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZ - ( - T const& angleX, - T const& angleZ - ) - { - return eulerAngleX(angleX) * eulerAngleZ(angleZ); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZX - ( - T const& angleZ, - T const& angleX - ) - { - return eulerAngleZ(angleZ) * eulerAngleX(angleX); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZ - ( - T const& angleY, - T const& angleZ - ) - { - return eulerAngleY(angleY) * eulerAngleZ(angleZ); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZY - ( - T const& angleZ, - T const& angleY - ) - { - return eulerAngleZ(angleZ) * eulerAngleY(angleY); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXYZ - ( - T const& t1, - T const& t2, - T const& t3 - ) - { - T c1 = glm::cos(-t1); - T c2 = glm::cos(-t2); - T c3 = glm::cos(-t3); - T s1 = glm::sin(-t1); - T s2 = glm::sin(-t2); - T s3 = glm::sin(-t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c2 * c3; - Result[0][1] =-c1 * s3 + s1 * s2 * c3; - Result[0][2] = s1 * s3 + c1 * s2 * c3; - Result[0][3] = static_cast(0); - Result[1][0] = c2 * s3; - Result[1][1] = c1 * c3 + s1 * s2 * s3; - Result[1][2] =-s1 * c3 + c1 * s2 * s3; - Result[1][3] = static_cast(0); - Result[2][0] =-s2; - Result[2][1] = s1 * c2; - Result[2][2] = c1 * c2; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYXZ - ( - T const& yaw, - T const& pitch, - T const& roll - ) - { - T tmp_ch = glm::cos(yaw); - T tmp_sh = glm::sin(yaw); - T tmp_cp = glm::cos(pitch); - T tmp_sp = glm::sin(pitch); - T tmp_cb = glm::cos(roll); - T tmp_sb = glm::sin(roll); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = tmp_ch * tmp_cb + tmp_sh * tmp_sp * tmp_sb; - Result[0][1] = tmp_sb * tmp_cp; - Result[0][2] = -tmp_sh * tmp_cb + tmp_ch * tmp_sp * tmp_sb; - Result[0][3] = static_cast(0); - Result[1][0] = -tmp_ch * tmp_sb + tmp_sh * tmp_sp * tmp_cb; - Result[1][1] = tmp_cb * tmp_cp; - Result[1][2] = tmp_sb * tmp_sh + tmp_ch * tmp_sp * tmp_cb; - Result[1][3] = static_cast(0); - Result[2][0] = tmp_sh * tmp_cp; - Result[2][1] = -tmp_sp; - Result[2][2] = tmp_ch * tmp_cp; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZX - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c2; - Result[0][1] = c1 * s2; - Result[0][2] = s1 * s2; - Result[0][3] = static_cast(0); - Result[1][0] =-c3 * s2; - Result[1][1] = c1 * c2 * c3 - s1 * s3; - Result[1][2] = c1 * s3 + c2 * c3 * s1; - Result[1][3] = static_cast(0); - Result[2][0] = s2 * s3; - Result[2][1] =-c3 * s1 - c1 * c2 * s3; - Result[2][2] = c1 * c3 - c2 * s1 * s3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXYX - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c2; - Result[0][1] = s1 * s2; - Result[0][2] =-c1 * s2; - Result[0][3] = static_cast(0); - Result[1][0] = s2 * s3; - Result[1][1] = c1 * c3 - c2 * s1 * s3; - Result[1][2] = c3 * s1 + c1 * c2 * s3; - Result[1][3] = static_cast(0); - Result[2][0] = c3 * s2; - Result[2][1] =-c1 * s3 - c2 * c3 * s1; - Result[2][2] = c1 * c2 * c3 - s1 * s3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYXY - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c1 * c3 - c2 * s1 * s3; - Result[0][1] = s2* s3; - Result[0][2] =-c3 * s1 - c1 * c2 * s3; - Result[0][3] = static_cast(0); - Result[1][0] = s1 * s2; - Result[1][1] = c2; - Result[1][2] = c1 * s2; - Result[1][3] = static_cast(0); - Result[2][0] = c1 * s3 + c2 * c3 * s1; - Result[2][1] =-c3 * s2; - Result[2][2] = c1 * c2 * c3 - s1 * s3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZY - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c1 * c2 * c3 - s1 * s3; - Result[0][1] = c3 * s2; - Result[0][2] =-c1 * s3 - c2 * c3 * s1; - Result[0][3] = static_cast(0); - Result[1][0] =-c1 * s2; - Result[1][1] = c2; - Result[1][2] = s1 * s2; - Result[1][3] = static_cast(0); - Result[2][0] = c3 * s1 + c1 * c2 * s3; - Result[2][1] = s2 * s3; - Result[2][2] = c1 * c3 - c2 * s1 * s3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZYZ - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c1 * c2 * c3 - s1 * s3; - Result[0][1] = c1 * s3 + c2 * c3 * s1; - Result[0][2] =-c3 * s2; - Result[0][3] = static_cast(0); - Result[1][0] =-c3 * s1 - c1 * c2 * s3; - Result[1][1] = c1 * c3 - c2 * s1 * s3; - Result[1][2] = s2 * s3; - Result[1][3] = static_cast(0); - Result[2][0] = c1 * s2; - Result[2][1] = s1 * s2; - Result[2][2] = c2; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZXZ - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c1 * c3 - c2 * s1 * s3; - Result[0][1] = c3 * s1 + c1 * c2 * s3; - Result[0][2] = s2 *s3; - Result[0][3] = static_cast(0); - Result[1][0] =-c1 * s3 - c2 * c3 * s1; - Result[1][1] = c1 * c2 * c3 - s1 * s3; - Result[1][2] = c3 * s2; - Result[1][3] = static_cast(0); - Result[2][0] = s1 * s2; - Result[2][1] =-c1 * s2; - Result[2][2] = c2; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZY - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c2 * c3; - Result[0][1] = s1 * s3 + c1 * c3 * s2; - Result[0][2] = c3 * s1 * s2 - c1 * s3; - Result[0][3] = static_cast(0); - Result[1][0] =-s2; - Result[1][1] = c1 * c2; - Result[1][2] = c2 * s1; - Result[1][3] = static_cast(0); - Result[2][0] = c2 * s3; - Result[2][1] = c1 * s2 * s3 - c3 * s1; - Result[2][2] = c1 * c3 + s1 * s2 *s3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZX - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c1 * c2; - Result[0][1] = s2; - Result[0][2] =-c2 * s1; - Result[0][3] = static_cast(0); - Result[1][0] = s1 * s3 - c1 * c3 * s2; - Result[1][1] = c2 * c3; - Result[1][2] = c1 * s3 + c3 * s1 * s2; - Result[1][3] = static_cast(0); - Result[2][0] = c3 * s1 + c1 * s2 * s3; - Result[2][1] =-c2 * s3; - Result[2][2] = c1 * c3 - s1 * s2 * s3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZYX - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c1 * c2; - Result[0][1] = c2 * s1; - Result[0][2] =-s2; - Result[0][3] = static_cast(0); - Result[1][0] = c1 * s2 * s3 - c3 * s1; - Result[1][1] = c1 * c3 + s1 * s2 * s3; - Result[1][2] = c2 * s3; - Result[1][3] = static_cast(0); - Result[2][0] = s1 * s3 + c1 * c3 * s2; - Result[2][1] = c3 * s1 * s2 - c1 * s3; - Result[2][2] = c2 * c3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZXY - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c1 * c3 - s1 * s2 * s3; - Result[0][1] = c3 * s1 + c1 * s2 * s3; - Result[0][2] =-c2 * s3; - Result[0][3] = static_cast(0); - Result[1][0] =-c2 * s1; - Result[1][1] = c1 * c2; - Result[1][2] = s2; - Result[1][3] = static_cast(0); - Result[2][0] = c1 * s3 + c3 * s1 * s2; - Result[2][1] = s1 * s3 - c1 * c3 * s2; - Result[2][2] = c2 * c3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> yawPitchRoll - ( - T const& yaw, - T const& pitch, - T const& roll - ) - { - T tmp_ch = glm::cos(yaw); - T tmp_sh = glm::sin(yaw); - T tmp_cp = glm::cos(pitch); - T tmp_sp = glm::sin(pitch); - T tmp_cb = glm::cos(roll); - T tmp_sb = glm::sin(roll); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = tmp_ch * tmp_cb + tmp_sh * tmp_sp * tmp_sb; - Result[0][1] = tmp_sb * tmp_cp; - Result[0][2] = -tmp_sh * tmp_cb + tmp_ch * tmp_sp * tmp_sb; - Result[0][3] = static_cast(0); - Result[1][0] = -tmp_ch * tmp_sb + tmp_sh * tmp_sp * tmp_cb; - Result[1][1] = tmp_cb * tmp_cp; - Result[1][2] = tmp_sb * tmp_sh + tmp_ch * tmp_sp * tmp_cb; - Result[1][3] = static_cast(0); - Result[2][0] = tmp_sh * tmp_cp; - Result[2][1] = -tmp_sp; - Result[2][2] = tmp_ch * tmp_cp; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> orientate2 - ( - T const& angle - ) - { - T c = glm::cos(angle); - T s = glm::sin(angle); - - mat<2, 2, T, defaultp> Result; - Result[0][0] = c; - Result[0][1] = s; - Result[1][0] = -s; - Result[1][1] = c; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> orientate3 - ( - T const& angle - ) - { - T c = glm::cos(angle); - T s = glm::sin(angle); - - mat<3, 3, T, defaultp> Result; - Result[0][0] = c; - Result[0][1] = s; - Result[0][2] = 0.0f; - Result[1][0] = -s; - Result[1][1] = c; - Result[1][2] = 0.0f; - Result[2][0] = 0.0f; - Result[2][1] = 0.0f; - Result[2][2] = 1.0f; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> orientate3 - ( - vec<3, T, Q> const& angles - ) - { - return mat<3, 3, T, Q>(yawPitchRoll(angles.z, angles.x, angles.y)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> orientate4 - ( - vec<3, T, Q> const& angles - ) - { - return yawPitchRoll(angles.z, angles.x, angles.y); - } - - template - GLM_FUNC_DECL void extractEulerAngleXYZ(mat<4, 4, T, defaultp> const& M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[2][1], M[2][2]); - T C2 = glm::sqrt(M[0][0]*M[0][0] + M[1][0]*M[1][0]); - T T2 = glm::atan2(-M[2][0], C2); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(S1*M[0][2] - C1*M[0][1], C1*M[1][1] - S1*M[1][2 ]); - t1 = -T1; - t2 = -T2; - t3 = -T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleYXZ(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[2][0], M[2][2]); - T C2 = glm::sqrt(M[0][1]*M[0][1] + M[1][1]*M[1][1]); - T T2 = glm::atan2(-M[2][1], C2); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(S1*M[1][2] - C1*M[1][0], C1*M[0][0] - S1*M[0][2]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleXZX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[0][2], M[0][1]); - T S2 = glm::sqrt(M[1][0]*M[1][0] + M[2][0]*M[2][0]); - T T2 = glm::atan2(S2, M[0][0]); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(C1*M[1][2] - S1*M[1][1], C1*M[2][2] - S1*M[2][1]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleXYX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[0][1], -M[0][2]); - T S2 = glm::sqrt(M[1][0]*M[1][0] + M[2][0]*M[2][0]); - T T2 = glm::atan2(S2, M[0][0]); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(-C1*M[2][1] - S1*M[2][2], C1*M[1][1] + S1*M[1][2]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleYXY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[1][0], M[1][2]); - T S2 = glm::sqrt(M[0][1]*M[0][1] + M[2][1]*M[2][1]); - T T2 = glm::atan2(S2, M[1][1]); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(C1*M[2][0] - S1*M[2][2], C1*M[0][0] - S1*M[0][2]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleYZY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[1][2], -M[1][0]); - T S2 = glm::sqrt(M[0][1]*M[0][1] + M[2][1]*M[2][1]); - T T2 = glm::atan2(S2, M[1][1]); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(-S1*M[0][0] - C1*M[0][2], S1*M[2][0] + C1*M[2][2]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleZYZ(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[2][1], M[2][0]); - T S2 = glm::sqrt(M[0][2]*M[0][2] + M[1][2]*M[1][2]); - T T2 = glm::atan2(S2, M[2][2]); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(C1*M[0][1] - S1*M[0][0], C1*M[1][1] - S1*M[1][0]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleZXZ(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[2][0], -M[2][1]); - T S2 = glm::sqrt(M[0][2]*M[0][2] + M[1][2]*M[1][2]); - T T2 = glm::atan2(S2, M[2][2]); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(-C1*M[1][0] - S1*M[1][1], C1*M[0][0] + S1*M[0][1]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleXZY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[1][2], M[1][1]); - T C2 = glm::sqrt(M[0][0]*M[0][0] + M[2][0]*M[2][0]); - T T2 = glm::atan2(-M[1][0], C2); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(S1*M[0][1] - C1*M[0][2], C1*M[2][2] - S1*M[2][1]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleYZX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(-M[0][2], M[0][0]); - T C2 = glm::sqrt(M[1][1]*M[1][1] + M[2][1]*M[2][1]); - T T2 = glm::atan2(M[0][1], C2); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(S1*M[1][0] + C1*M[1][2], S1*M[2][0] + C1*M[2][2]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleZYX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[0][1], M[0][0]); - T C2 = glm::sqrt(M[1][2]*M[1][2] + M[2][2]*M[2][2]); - T T2 = glm::atan2(-M[0][2], C2); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(S1*M[2][0] - C1*M[2][1], C1*M[1][1] - S1*M[1][0]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleZXY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(-M[1][0], M[1][1]); - T C2 = glm::sqrt(M[0][2]*M[0][2] + M[2][2]*M[2][2]); - T T2 = glm::atan2(M[1][2], C2); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(C1*M[2][0] + S1*M[2][1], C1*M[0][0] + S1*M[0][1]); - t1 = T1; - t2 = T2; - t3 = T3; - } -}//namespace glm diff --git a/third_party/glm/gtx/extend.hpp b/third_party/glm/gtx/extend.hpp deleted file mode 100755 index 28b7c5c..0000000 --- a/third_party/glm/gtx/extend.hpp +++ /dev/null @@ -1,42 +0,0 @@ -/// @ref gtx_extend -/// @file glm/gtx/extend.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_extend GLM_GTX_extend -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Extend a position from a source to a position at a defined length. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_extend is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_extend extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_extend - /// @{ - - /// Extends of Length the Origin position using the (Source - Origin) direction. - /// @see gtx_extend - template - GLM_FUNC_DECL genType extend( - genType const& Origin, - genType const& Source, - typename genType::value_type const Length); - - /// @} -}//namespace glm - -#include "extend.inl" diff --git a/third_party/glm/gtx/extend.inl b/third_party/glm/gtx/extend.inl deleted file mode 100755 index 32128eb..0000000 --- a/third_party/glm/gtx/extend.inl +++ /dev/null @@ -1,48 +0,0 @@ -/// @ref gtx_extend - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType extend - ( - genType const& Origin, - genType const& Source, - genType const& Distance - ) - { - return Origin + (Source - Origin) * Distance; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> extend - ( - vec<2, T, Q> const& Origin, - vec<2, T, Q> const& Source, - T const& Distance - ) - { - return Origin + (Source - Origin) * Distance; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> extend - ( - vec<3, T, Q> const& Origin, - vec<3, T, Q> const& Source, - T const& Distance - ) - { - return Origin + (Source - Origin) * Distance; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> extend - ( - vec<4, T, Q> const& Origin, - vec<4, T, Q> const& Source, - T const& Distance - ) - { - return Origin + (Source - Origin) * Distance; - } -}//namespace glm diff --git a/third_party/glm/gtx/extended_min_max.hpp b/third_party/glm/gtx/extended_min_max.hpp deleted file mode 100755 index ad23a91..0000000 --- a/third_party/glm/gtx/extended_min_max.hpp +++ /dev/null @@ -1,182 +0,0 @@ -/// @ref gtx_extended_min_max -/// @file glm/gtx/extended_min_max.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_extended_min_max GLM_GTX_extented_min_max -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Min and max functions for 3 to 4 parameters. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_extented_min_max is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_extented_min_max extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_extended_min_max - /// @{ - - /// Return the minimum component-wise values of 3 inputs - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL T min( - T const& x, - T const& y, - T const& z); - - /// Return the minimum component-wise values of 3 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C min( - C const& x, - typename C::T const& y, - typename C::T const& z); - - /// Return the minimum component-wise values of 3 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C min( - C const& x, - C const& y, - C const& z); - - /// Return the minimum component-wise values of 4 inputs - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL T min( - T const& x, - T const& y, - T const& z, - T const& w); - - /// Return the minimum component-wise values of 4 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C min( - C const& x, - typename C::T const& y, - typename C::T const& z, - typename C::T const& w); - - /// Return the minimum component-wise values of 4 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C min( - C const& x, - C const& y, - C const& z, - C const& w); - - /// Return the maximum component-wise values of 3 inputs - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL T max( - T const& x, - T const& y, - T const& z); - - /// Return the maximum component-wise values of 3 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C max( - C const& x, - typename C::T const& y, - typename C::T const& z); - - /// Return the maximum component-wise values of 3 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C max( - C const& x, - C const& y, - C const& z); - - /// Return the maximum component-wise values of 4 inputs - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL T max( - T const& x, - T const& y, - T const& z, - T const& w); - - /// Return the maximum component-wise values of 4 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C max( - C const& x, - typename C::T const& y, - typename C::T const& z, - typename C::T const& w); - - /// Return the maximum component-wise values of 4 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C max( - C const& x, - C const& y, - C const& z, - C const& w); - - /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam genType Floating-point or integer; scalar or vector types. - /// - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL genType fmin(genType x, genType y); - - /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam genType Floating-point; scalar or vector types. - /// - /// @see gtx_extented_min_max - /// @see std::fmax documentation - template - GLM_FUNC_DECL genType fmax(genType x, genType y); - - /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL genType fclamp(genType x, genType minVal, genType maxVal); - - /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL vec fclamp(vec const& x, T minVal, T maxVal); - - /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL vec fclamp(vec const& x, vec const& minVal, vec const& maxVal); - - - /// @} -}//namespace glm - -#include "extended_min_max.inl" diff --git a/third_party/glm/gtx/extended_min_max.inl b/third_party/glm/gtx/extended_min_max.inl deleted file mode 100755 index e72d1cc..0000000 --- a/third_party/glm/gtx/extended_min_max.inl +++ /dev/null @@ -1,218 +0,0 @@ -/// @ref gtx_extended_min_max - -namespace glm -{ - template - GLM_FUNC_QUALIFIER T min( - T const& x, - T const& y, - T const& z) - { - return glm::min(glm::min(x, y), z); - } - - template class C> - GLM_FUNC_QUALIFIER C min - ( - C const& x, - typename C::T const& y, - typename C::T const& z - ) - { - return glm::min(glm::min(x, y), z); - } - - template class C> - GLM_FUNC_QUALIFIER C min - ( - C const& x, - C const& y, - C const& z - ) - { - return glm::min(glm::min(x, y), z); - } - - template - GLM_FUNC_QUALIFIER T min - ( - T const& x, - T const& y, - T const& z, - T const& w - ) - { - return glm::min(glm::min(x, y), glm::min(z, w)); - } - - template class C> - GLM_FUNC_QUALIFIER C min - ( - C const& x, - typename C::T const& y, - typename C::T const& z, - typename C::T const& w - ) - { - return glm::min(glm::min(x, y), glm::min(z, w)); - } - - template class C> - GLM_FUNC_QUALIFIER C min - ( - C const& x, - C const& y, - C const& z, - C const& w - ) - { - return glm::min(glm::min(x, y), glm::min(z, w)); - } - - template - GLM_FUNC_QUALIFIER T max( - T const& x, - T const& y, - T const& z) - { - return glm::max(glm::max(x, y), z); - } - - template class C> - GLM_FUNC_QUALIFIER C max - ( - C const& x, - typename C::T const& y, - typename C::T const& z - ) - { - return glm::max(glm::max(x, y), z); - } - - template class C> - GLM_FUNC_QUALIFIER C max - ( - C const& x, - C const& y, - C const& z - ) - { - return glm::max(glm::max(x, y), z); - } - - template - GLM_FUNC_QUALIFIER T max - ( - T const& x, - T const& y, - T const& z, - T const& w - ) - { - return glm::max(glm::max(x, y), glm::max(z, w)); - } - - template class C> - GLM_FUNC_QUALIFIER C max - ( - C const& x, - typename C::T const& y, - typename C::T const& z, - typename C::T const& w - ) - { - return glm::max(glm::max(x, y), glm::max(z, w)); - } - - template class C> - GLM_FUNC_QUALIFIER C max - ( - C const& x, - C const& y, - C const& z, - C const& w - ) - { - return glm::max(glm::max(x, y), glm::max(z, w)); - } - - // fmin -# if GLM_HAS_CXX11_STL - using std::fmin; -# else - template - GLM_FUNC_QUALIFIER genType fmin(genType x, genType y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point input"); - - if (isnan(x)) - return y; - if (isnan(y)) - return x; - - return min(x, y); - } -# endif - - template - GLM_FUNC_QUALIFIER vec fmin(vec const& a, T b) - { - return detail::functor2::call(fmin, a, vec(b)); - } - - template - GLM_FUNC_QUALIFIER vec fmin(vec const& a, vec const& b) - { - return detail::functor2::call(fmin, a, b); - } - - // fmax -# if GLM_HAS_CXX11_STL - using std::fmax; -# else - template - GLM_FUNC_QUALIFIER genType fmax(genType x, genType y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point input"); - - if (isnan(x)) - return y; - if (isnan(y)) - return x; - - return max(x, y); - } -# endif - - template - GLM_FUNC_QUALIFIER vec fmax(vec const& a, T b) - { - return detail::functor2::call(fmax, a, vec(b)); - } - - template - GLM_FUNC_QUALIFIER vec fmax(vec const& a, vec const& b) - { - return detail::functor2::call(fmax, a, b); - } - - // fclamp - template - GLM_FUNC_QUALIFIER genType fclamp(genType x, genType minVal, genType maxVal) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fclamp' only accept floating-point or integer inputs"); - return fmin(fmax(x, minVal), maxVal); - } - - template - GLM_FUNC_QUALIFIER vec fclamp(vec const& x, T minVal, T maxVal) - { - return fmin(fmax(x, vec(minVal)), vec(maxVal)); - } - - template - GLM_FUNC_QUALIFIER vec fclamp(vec const& x, vec const& minVal, vec const& maxVal) - { - return fmin(fmax(x, minVal), maxVal); - } -}//namespace glm diff --git a/third_party/glm/gtx/exterior_product.hpp b/third_party/glm/gtx/exterior_product.hpp deleted file mode 100755 index 5522df7..0000000 --- a/third_party/glm/gtx/exterior_product.hpp +++ /dev/null @@ -1,45 +0,0 @@ -/// @ref gtx_exterior_product -/// @file glm/gtx/exterior_product.hpp -/// -/// @see core (dependence) -/// @see gtx_exterior_product (dependence) -/// -/// @defgroup gtx_exterior_product GLM_GTX_exterior_product -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// @brief Allow to perform bit operations on integer values - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_exterior_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_exterior_product extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_exterior_product - /// @{ - - /// Returns the cross product of x and y. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see Exterior product - template - GLM_FUNC_DECL T cross(vec<2, T, Q> const& v, vec<2, T, Q> const& u); - - /// @} -} //namespace glm - -#include "exterior_product.inl" diff --git a/third_party/glm/gtx/exterior_product.inl b/third_party/glm/gtx/exterior_product.inl deleted file mode 100755 index 93661fd..0000000 --- a/third_party/glm/gtx/exterior_product.inl +++ /dev/null @@ -1,26 +0,0 @@ -/// @ref gtx_exterior_product - -#include - -namespace glm { -namespace detail -{ - template - struct compute_cross_vec2 - { - GLM_FUNC_QUALIFIER static T call(vec<2, T, Q> const& v, vec<2, T, Q> const& u) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'cross' accepts only floating-point inputs"); - - return v.x * u.y - u.x * v.y; - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER T cross(vec<2, T, Q> const& x, vec<2, T, Q> const& y) - { - return detail::compute_cross_vec2::value>::call(x, y); - } -}//namespace glm - diff --git a/third_party/glm/gtx/fast_exponential.hpp b/third_party/glm/gtx/fast_exponential.hpp deleted file mode 100755 index 6fb7286..0000000 --- a/third_party/glm/gtx/fast_exponential.hpp +++ /dev/null @@ -1,95 +0,0 @@ -/// @ref gtx_fast_exponential -/// @file glm/gtx/fast_exponential.hpp -/// -/// @see core (dependence) -/// @see gtx_half_float (dependence) -/// -/// @defgroup gtx_fast_exponential GLM_GTX_fast_exponential -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Fast but less accurate implementations of exponential based functions. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_fast_exponential is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_fast_exponential extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_fast_exponential - /// @{ - - /// Faster than the common pow function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL genType fastPow(genType x, genType y); - - /// Faster than the common pow function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL vec fastPow(vec const& x, vec const& y); - - /// Faster than the common pow function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL genTypeT fastPow(genTypeT x, genTypeU y); - - /// Faster than the common pow function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL vec fastPow(vec const& x); - - /// Faster than the common exp function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL T fastExp(T x); - - /// Faster than the common exp function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL vec fastExp(vec const& x); - - /// Faster than the common log function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL T fastLog(T x); - - /// Faster than the common exp2 function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL vec fastLog(vec const& x); - - /// Faster than the common exp2 function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL T fastExp2(T x); - - /// Faster than the common exp2 function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL vec fastExp2(vec const& x); - - /// Faster than the common log2 function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL T fastLog2(T x); - - /// Faster than the common log2 function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL vec fastLog2(vec const& x); - - /// @} -}//namespace glm - -#include "fast_exponential.inl" diff --git a/third_party/glm/gtx/fast_exponential.inl b/third_party/glm/gtx/fast_exponential.inl deleted file mode 100755 index f139e50..0000000 --- a/third_party/glm/gtx/fast_exponential.inl +++ /dev/null @@ -1,136 +0,0 @@ -/// @ref gtx_fast_exponential - -namespace glm -{ - // fastPow: - template - GLM_FUNC_QUALIFIER genType fastPow(genType x, genType y) - { - return exp(y * log(x)); - } - - template - GLM_FUNC_QUALIFIER vec fastPow(vec const& x, vec const& y) - { - return exp(y * log(x)); - } - - template - GLM_FUNC_QUALIFIER T fastPow(T x, int y) - { - T f = static_cast(1); - for(int i = 0; i < y; ++i) - f *= x; - return f; - } - - template - GLM_FUNC_QUALIFIER vec fastPow(vec const& x, vec const& y) - { - vec Result; - for(length_t i = 0, n = x.length(); i < n; ++i) - Result[i] = fastPow(x[i], y[i]); - return Result; - } - - // fastExp - // Note: This function provides accurate results only for value between -1 and 1, else avoid it. - template - GLM_FUNC_QUALIFIER T fastExp(T x) - { - // This has a better looking and same performance in release mode than the following code. However, in debug mode it's slower. - // return 1.0f + x * (1.0f + x * 0.5f * (1.0f + x * 0.3333333333f * (1.0f + x * 0.25 * (1.0f + x * 0.2f)))); - T x2 = x * x; - T x3 = x2 * x; - T x4 = x3 * x; - T x5 = x4 * x; - return T(1) + x + (x2 * T(0.5)) + (x3 * T(0.1666666667)) + (x4 * T(0.041666667)) + (x5 * T(0.008333333333)); - } - /* // Try to handle all values of float... but often shower than std::exp, glm::floor and the loop kill the performance - GLM_FUNC_QUALIFIER float fastExp(float x) - { - const float e = 2.718281828f; - const float IntegerPart = floor(x); - const float FloatPart = x - IntegerPart; - float z = 1.f; - - for(int i = 0; i < int(IntegerPart); ++i) - z *= e; - - const float x2 = FloatPart * FloatPart; - const float x3 = x2 * FloatPart; - const float x4 = x3 * FloatPart; - const float x5 = x4 * FloatPart; - return z * (1.0f + FloatPart + (x2 * 0.5f) + (x3 * 0.1666666667f) + (x4 * 0.041666667f) + (x5 * 0.008333333333f)); - } - - // Increase accuracy on number bigger that 1 and smaller than -1 but it's not enough for high and negative numbers - GLM_FUNC_QUALIFIER float fastExp(float x) - { - // This has a better looking and same performance in release mode than the following code. However, in debug mode it's slower. - // return 1.0f + x * (1.0f + x * 0.5f * (1.0f + x * 0.3333333333f * (1.0f + x * 0.25 * (1.0f + x * 0.2f)))); - float x2 = x * x; - float x3 = x2 * x; - float x4 = x3 * x; - float x5 = x4 * x; - float x6 = x5 * x; - float x7 = x6 * x; - float x8 = x7 * x; - return 1.0f + x + (x2 * 0.5f) + (x3 * 0.1666666667f) + (x4 * 0.041666667f) + (x5 * 0.008333333333f)+ (x6 * 0.00138888888888f) + (x7 * 0.000198412698f) + (x8 * 0.0000248015873f);; - } - */ - - template - GLM_FUNC_QUALIFIER vec fastExp(vec const& x) - { - return detail::functor1::call(fastExp, x); - } - - // fastLog - template - GLM_FUNC_QUALIFIER genType fastLog(genType x) - { - return std::log(x); - } - - /* Slower than the VC7.1 function... - GLM_FUNC_QUALIFIER float fastLog(float x) - { - float y1 = (x - 1.0f) / (x + 1.0f); - float y2 = y1 * y1; - return 2.0f * y1 * (1.0f + y2 * (0.3333333333f + y2 * (0.2f + y2 * 0.1428571429f))); - } - */ - - template - GLM_FUNC_QUALIFIER vec fastLog(vec const& x) - { - return detail::functor1::call(fastLog, x); - } - - //fastExp2, ln2 = 0.69314718055994530941723212145818f - template - GLM_FUNC_QUALIFIER genType fastExp2(genType x) - { - return fastExp(0.69314718055994530941723212145818f * x); - } - - template - GLM_FUNC_QUALIFIER vec fastExp2(vec const& x) - { - return detail::functor1::call(fastExp2, x); - } - - // fastLog2, ln2 = 0.69314718055994530941723212145818f - template - GLM_FUNC_QUALIFIER genType fastLog2(genType x) - { - return fastLog(x) / 0.69314718055994530941723212145818f; - } - - template - GLM_FUNC_QUALIFIER vec fastLog2(vec const& x) - { - return detail::functor1::call(fastLog2, x); - } -}//namespace glm diff --git a/third_party/glm/gtx/fast_square_root.hpp b/third_party/glm/gtx/fast_square_root.hpp deleted file mode 100755 index 9fb3f2f..0000000 --- a/third_party/glm/gtx/fast_square_root.hpp +++ /dev/null @@ -1,92 +0,0 @@ -/// @ref gtx_fast_square_root -/// @file glm/gtx/fast_square_root.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_fast_square_root GLM_GTX_fast_square_root -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Fast but less accurate implementations of square root based functions. -/// - Sqrt optimisation based on Newton's method, -/// www.gamedev.net/community/forums/topic.asp?topic id=139956 - -#pragma once - -// Dependency: -#include "../common.hpp" -#include "../exponential.hpp" -#include "../geometric.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_fast_square_root is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_fast_square_root extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_fast_square_root - /// @{ - - /// Faster than the common sqrt function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL genType fastSqrt(genType x); - - /// Faster than the common sqrt function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL vec fastSqrt(vec const& x); - - /// Faster than the common inversesqrt function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL genType fastInverseSqrt(genType x); - - /// Faster than the common inversesqrt function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL vec fastInverseSqrt(vec const& x); - - /// Faster than the common length function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL genType fastLength(genType x); - - /// Faster than the common length function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL T fastLength(vec const& x); - - /// Faster than the common distance function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL genType fastDistance(genType x, genType y); - - /// Faster than the common distance function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL T fastDistance(vec const& x, vec const& y); - - /// Faster than the common normalize function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL genType fastNormalize(genType const& x); - - /// @} -}// namespace glm - -#include "fast_square_root.inl" diff --git a/third_party/glm/gtx/fast_square_root.inl b/third_party/glm/gtx/fast_square_root.inl deleted file mode 100755 index 4e6c6de..0000000 --- a/third_party/glm/gtx/fast_square_root.inl +++ /dev/null @@ -1,75 +0,0 @@ -/// @ref gtx_fast_square_root - -namespace glm -{ - // fastSqrt - template - GLM_FUNC_QUALIFIER genType fastSqrt(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fastSqrt' only accept floating-point input"); - - return genType(1) / fastInverseSqrt(x); - } - - template - GLM_FUNC_QUALIFIER vec fastSqrt(vec const& x) - { - return detail::functor1::call(fastSqrt, x); - } - - // fastInversesqrt - template - GLM_FUNC_QUALIFIER genType fastInverseSqrt(genType x) - { - return detail::compute_inversesqrt<1, genType, lowp, detail::is_aligned::value>::call(vec<1, genType, lowp>(x)).x; - } - - template - GLM_FUNC_QUALIFIER vec fastInverseSqrt(vec const& x) - { - return detail::compute_inversesqrt::value>::call(x); - } - - // fastLength - template - GLM_FUNC_QUALIFIER genType fastLength(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fastLength' only accept floating-point inputs"); - - return abs(x); - } - - template - GLM_FUNC_QUALIFIER T fastLength(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fastLength' only accept floating-point inputs"); - - return fastSqrt(dot(x, x)); - } - - // fastDistance - template - GLM_FUNC_QUALIFIER genType fastDistance(genType x, genType y) - { - return fastLength(y - x); - } - - template - GLM_FUNC_QUALIFIER T fastDistance(vec const& x, vec const& y) - { - return fastLength(y - x); - } - - // fastNormalize - template - GLM_FUNC_QUALIFIER genType fastNormalize(genType x) - { - return x > genType(0) ? genType(1) : -genType(1); - } - - template - GLM_FUNC_QUALIFIER vec fastNormalize(vec const& x) - { - return x * fastInverseSqrt(dot(x, x)); - } -}//namespace glm diff --git a/third_party/glm/gtx/fast_trigonometry.hpp b/third_party/glm/gtx/fast_trigonometry.hpp deleted file mode 100755 index 2650d6e..0000000 --- a/third_party/glm/gtx/fast_trigonometry.hpp +++ /dev/null @@ -1,79 +0,0 @@ -/// @ref gtx_fast_trigonometry -/// @file glm/gtx/fast_trigonometry.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_fast_trigonometry GLM_GTX_fast_trigonometry -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Fast but less accurate implementations of trigonometric functions. - -#pragma once - -// Dependency: -#include "../gtc/constants.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_fast_trigonometry is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_fast_trigonometry extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_fast_trigonometry - /// @{ - - /// Wrap an angle to [0 2pi[ - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T wrapAngle(T angle); - - /// Faster than the common sin function but less accurate. - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T fastSin(T angle); - - /// Faster than the common cos function but less accurate. - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T fastCos(T angle); - - /// Faster than the common tan function but less accurate. - /// Defined between -2pi and 2pi. - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T fastTan(T angle); - - /// Faster than the common asin function but less accurate. - /// Defined between -2pi and 2pi. - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T fastAsin(T angle); - - /// Faster than the common acos function but less accurate. - /// Defined between -2pi and 2pi. - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T fastAcos(T angle); - - /// Faster than the common atan function but less accurate. - /// Defined between -2pi and 2pi. - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T fastAtan(T y, T x); - - /// Faster than the common atan function but less accurate. - /// Defined between -2pi and 2pi. - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T fastAtan(T angle); - - /// @} -}//namespace glm - -#include "fast_trigonometry.inl" diff --git a/third_party/glm/gtx/fast_trigonometry.inl b/third_party/glm/gtx/fast_trigonometry.inl deleted file mode 100755 index 1a710cb..0000000 --- a/third_party/glm/gtx/fast_trigonometry.inl +++ /dev/null @@ -1,142 +0,0 @@ -/// @ref gtx_fast_trigonometry - -namespace glm{ -namespace detail -{ - template - GLM_FUNC_QUALIFIER vec taylorCos(vec const& x) - { - return static_cast(1) - - (x * x) * (1.f / 2.f) - + ((x * x) * (x * x)) * (1.f / 24.f) - - (((x * x) * (x * x)) * (x * x)) * (1.f / 720.f) - + (((x * x) * (x * x)) * ((x * x) * (x * x))) * (1.f / 40320.f); - } - - template - GLM_FUNC_QUALIFIER T cos_52s(T x) - { - T const xx(x * x); - return (T(0.9999932946) + xx * (T(-0.4999124376) + xx * (T(0.0414877472) + xx * T(-0.0012712095)))); - } - - template - GLM_FUNC_QUALIFIER vec cos_52s(vec const& x) - { - return detail::functor1::call(cos_52s, x); - } -}//namespace detail - - // wrapAngle - template - GLM_FUNC_QUALIFIER T wrapAngle(T angle) - { - return abs(mod(angle, two_pi())); - } - - template - GLM_FUNC_QUALIFIER vec wrapAngle(vec const& x) - { - return detail::functor1::call(wrapAngle, x); - } - - // cos - template - GLM_FUNC_QUALIFIER T fastCos(T x) - { - T const angle(wrapAngle(x)); - - if(angle < half_pi()) - return detail::cos_52s(angle); - if(angle < pi()) - return -detail::cos_52s(pi() - angle); - if(angle < (T(3) * half_pi())) - return -detail::cos_52s(angle - pi()); - - return detail::cos_52s(two_pi() - angle); - } - - template - GLM_FUNC_QUALIFIER vec fastCos(vec const& x) - { - return detail::functor1::call(fastCos, x); - } - - // sin - template - GLM_FUNC_QUALIFIER T fastSin(T x) - { - return fastCos(half_pi() - x); - } - - template - GLM_FUNC_QUALIFIER vec fastSin(vec const& x) - { - return detail::functor1::call(fastSin, x); - } - - // tan - template - GLM_FUNC_QUALIFIER T fastTan(T x) - { - return x + (x * x * x * T(0.3333333333)) + (x * x * x * x * x * T(0.1333333333333)) + (x * x * x * x * x * x * x * T(0.0539682539)); - } - - template - GLM_FUNC_QUALIFIER vec fastTan(vec const& x) - { - return detail::functor1::call(fastTan, x); - } - - // asin - template - GLM_FUNC_QUALIFIER T fastAsin(T x) - { - return x + (x * x * x * T(0.166666667)) + (x * x * x * x * x * T(0.075)) + (x * x * x * x * x * x * x * T(0.0446428571)) + (x * x * x * x * x * x * x * x * x * T(0.0303819444));// + (x * x * x * x * x * x * x * x * x * x * x * T(0.022372159)); - } - - template - GLM_FUNC_QUALIFIER vec fastAsin(vec const& x) - { - return detail::functor1::call(fastAsin, x); - } - - // acos - template - GLM_FUNC_QUALIFIER T fastAcos(T x) - { - return T(1.5707963267948966192313216916398) - fastAsin(x); //(PI / 2) - } - - template - GLM_FUNC_QUALIFIER vec fastAcos(vec const& x) - { - return detail::functor1::call(fastAcos, x); - } - - // atan - template - GLM_FUNC_QUALIFIER T fastAtan(T y, T x) - { - T sgn = sign(y) * sign(x); - return abs(fastAtan(y / x)) * sgn; - } - - template - GLM_FUNC_QUALIFIER vec fastAtan(vec const& y, vec const& x) - { - return detail::functor2::call(fastAtan, y, x); - } - - template - GLM_FUNC_QUALIFIER T fastAtan(T x) - { - return x - (x * x * x * T(0.333333333333)) + (x * x * x * x * x * T(0.2)) - (x * x * x * x * x * x * x * T(0.1428571429)) + (x * x * x * x * x * x * x * x * x * T(0.111111111111)) - (x * x * x * x * x * x * x * x * x * x * x * T(0.0909090909)); - } - - template - GLM_FUNC_QUALIFIER vec fastAtan(vec const& x) - { - return detail::functor1::call(fastAtan, x); - } -}//namespace glm diff --git a/third_party/glm/gtx/float_notmalize.inl b/third_party/glm/gtx/float_notmalize.inl deleted file mode 100755 index 8cdbc5a..0000000 --- a/third_party/glm/gtx/float_notmalize.inl +++ /dev/null @@ -1,13 +0,0 @@ -/// @ref gtx_float_normalize - -#include - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec floatNormalize(vec const& v) - { - return vec(v) / static_cast(std::numeric_limits::max()); - } - -}//namespace glm diff --git a/third_party/glm/gtx/functions.hpp b/third_party/glm/gtx/functions.hpp deleted file mode 100755 index 9f4166c..0000000 --- a/third_party/glm/gtx/functions.hpp +++ /dev/null @@ -1,56 +0,0 @@ -/// @ref gtx_functions -/// @file glm/gtx/functions.hpp -/// -/// @see core (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtx_functions GLM_GTX_functions -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// List of useful common functions. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/type_vec2.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_functions is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_functions extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_functions - /// @{ - - /// 1D gauss function - /// - /// @see gtc_epsilon - template - GLM_FUNC_DECL T gauss( - T x, - T ExpectedValue, - T StandardDeviation); - - /// 2D gauss function - /// - /// @see gtc_epsilon - template - GLM_FUNC_DECL T gauss( - vec<2, T, Q> const& Coord, - vec<2, T, Q> const& ExpectedValue, - vec<2, T, Q> const& StandardDeviation); - - /// @} -}//namespace glm - -#include "functions.inl" - diff --git a/third_party/glm/gtx/functions.inl b/third_party/glm/gtx/functions.inl deleted file mode 100755 index 29cbb20..0000000 --- a/third_party/glm/gtx/functions.inl +++ /dev/null @@ -1,30 +0,0 @@ -/// @ref gtx_functions - -#include "../exponential.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER T gauss - ( - T x, - T ExpectedValue, - T StandardDeviation - ) - { - return exp(-((x - ExpectedValue) * (x - ExpectedValue)) / (static_cast(2) * StandardDeviation * StandardDeviation)) / (StandardDeviation * sqrt(static_cast(6.28318530717958647692528676655900576))); - } - - template - GLM_FUNC_QUALIFIER T gauss - ( - vec<2, T, Q> const& Coord, - vec<2, T, Q> const& ExpectedValue, - vec<2, T, Q> const& StandardDeviation - ) - { - vec<2, T, Q> const Squared = ((Coord - ExpectedValue) * (Coord - ExpectedValue)) / (static_cast(2) * StandardDeviation * StandardDeviation); - return exp(-(Squared.x + Squared.y)); - } -}//namespace glm - diff --git a/third_party/glm/gtx/gradient_paint.hpp b/third_party/glm/gtx/gradient_paint.hpp deleted file mode 100755 index 6f85bf4..0000000 --- a/third_party/glm/gtx/gradient_paint.hpp +++ /dev/null @@ -1,53 +0,0 @@ -/// @ref gtx_gradient_paint -/// @file glm/gtx/gradient_paint.hpp -/// -/// @see core (dependence) -/// @see gtx_optimum_pow (dependence) -/// -/// @defgroup gtx_gradient_paint GLM_GTX_gradient_paint -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Functions that return the color of procedural gradient for specific coordinates. - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtx/optimum_pow.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_gradient_paint is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_gradient_paint extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_gradient_paint - /// @{ - - /// Return a color from a radial gradient. - /// @see - gtx_gradient_paint - template - GLM_FUNC_DECL T radialGradient( - vec<2, T, Q> const& Center, - T const& Radius, - vec<2, T, Q> const& Focal, - vec<2, T, Q> const& Position); - - /// Return a color from a linear gradient. - /// @see - gtx_gradient_paint - template - GLM_FUNC_DECL T linearGradient( - vec<2, T, Q> const& Point0, - vec<2, T, Q> const& Point1, - vec<2, T, Q> const& Position); - - /// @} -}// namespace glm - -#include "gradient_paint.inl" diff --git a/third_party/glm/gtx/gradient_paint.inl b/third_party/glm/gtx/gradient_paint.inl deleted file mode 100755 index 4c495e6..0000000 --- a/third_party/glm/gtx/gradient_paint.inl +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref gtx_gradient_paint - -namespace glm -{ - template - GLM_FUNC_QUALIFIER T radialGradient - ( - vec<2, T, Q> const& Center, - T const& Radius, - vec<2, T, Q> const& Focal, - vec<2, T, Q> const& Position - ) - { - vec<2, T, Q> F = Focal - Center; - vec<2, T, Q> D = Position - Focal; - T Radius2 = pow2(Radius); - T Fx2 = pow2(F.x); - T Fy2 = pow2(F.y); - - T Numerator = (D.x * F.x + D.y * F.y) + sqrt(Radius2 * (pow2(D.x) + pow2(D.y)) - pow2(D.x * F.y - D.y * F.x)); - T Denominator = Radius2 - (Fx2 + Fy2); - return Numerator / Denominator; - } - - template - GLM_FUNC_QUALIFIER T linearGradient - ( - vec<2, T, Q> const& Point0, - vec<2, T, Q> const& Point1, - vec<2, T, Q> const& Position - ) - { - vec<2, T, Q> Dist = Point1 - Point0; - return (Dist.x * (Position.x - Point0.x) + Dist.y * (Position.y - Point0.y)) / glm::dot(Dist, Dist); - } -}//namespace glm diff --git a/third_party/glm/gtx/handed_coordinate_space.hpp b/third_party/glm/gtx/handed_coordinate_space.hpp deleted file mode 100755 index 3c85968..0000000 --- a/third_party/glm/gtx/handed_coordinate_space.hpp +++ /dev/null @@ -1,50 +0,0 @@ -/// @ref gtx_handed_coordinate_space -/// @file glm/gtx/handed_coordinate_space.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_handed_coordinate_space GLM_GTX_handed_coordinate_space -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// To know if a set of three basis vectors defines a right or left-handed coordinate system. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_handed_coordinate_space is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_handed_coordinate_space extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_handed_coordinate_space - /// @{ - - //! Return if a trihedron right handed or not. - //! From GLM_GTX_handed_coordinate_space extension. - template - GLM_FUNC_DECL bool rightHanded( - vec<3, T, Q> const& tangent, - vec<3, T, Q> const& binormal, - vec<3, T, Q> const& normal); - - //! Return if a trihedron left handed or not. - //! From GLM_GTX_handed_coordinate_space extension. - template - GLM_FUNC_DECL bool leftHanded( - vec<3, T, Q> const& tangent, - vec<3, T, Q> const& binormal, - vec<3, T, Q> const& normal); - - /// @} -}// namespace glm - -#include "handed_coordinate_space.inl" diff --git a/third_party/glm/gtx/handed_coordinate_space.inl b/third_party/glm/gtx/handed_coordinate_space.inl deleted file mode 100755 index e43c17b..0000000 --- a/third_party/glm/gtx/handed_coordinate_space.inl +++ /dev/null @@ -1,26 +0,0 @@ -/// @ref gtx_handed_coordinate_space - -namespace glm -{ - template - GLM_FUNC_QUALIFIER bool rightHanded - ( - vec<3, T, Q> const& tangent, - vec<3, T, Q> const& binormal, - vec<3, T, Q> const& normal - ) - { - return dot(cross(normal, tangent), binormal) > T(0); - } - - template - GLM_FUNC_QUALIFIER bool leftHanded - ( - vec<3, T, Q> const& tangent, - vec<3, T, Q> const& binormal, - vec<3, T, Q> const& normal - ) - { - return dot(cross(normal, tangent), binormal) < T(0); - } -}//namespace glm diff --git a/third_party/glm/gtx/hash.hpp b/third_party/glm/gtx/hash.hpp deleted file mode 100755 index 05dae9f..0000000 --- a/third_party/glm/gtx/hash.hpp +++ /dev/null @@ -1,142 +0,0 @@ -/// @ref gtx_hash -/// @file glm/gtx/hash.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_hash GLM_GTX_hash -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Add std::hash support for glm types - -#pragma once - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_hash is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_hash extension included") -# endif -#endif - -#include - -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include "../gtc/vec1.hpp" - -#include "../gtc/quaternion.hpp" -#include "../gtx/dual_quaternion.hpp" - -#include "../mat2x2.hpp" -#include "../mat2x3.hpp" -#include "../mat2x4.hpp" - -#include "../mat3x2.hpp" -#include "../mat3x3.hpp" -#include "../mat3x4.hpp" - -#include "../mat4x2.hpp" -#include "../mat4x3.hpp" -#include "../mat4x4.hpp" - -#if !GLM_HAS_CXX11_STL -# error "GLM_GTX_hash requires C++11 standard library support" -#endif - -namespace std -{ - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::vec<1, T, Q> const& v) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::vec<2, T, Q> const& v) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::vec<3, T, Q> const& v) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::vec<4, T, Q> const& v) const; - }; - - template - struct hash> - { - GLM_FUNC_DECL size_t operator()(glm::qua const& q) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::tdualquat const& q) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<2, 2, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<2, 3, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<2, 4, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<3, 2, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<3, 3, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<3, 4, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<4, 2, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<4, 3, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<4, 4, T,Q> const& m) const; - }; -} // namespace std - -#include "hash.inl" diff --git a/third_party/glm/gtx/hash.inl b/third_party/glm/gtx/hash.inl deleted file mode 100755 index ff71ca9..0000000 --- a/third_party/glm/gtx/hash.inl +++ /dev/null @@ -1,184 +0,0 @@ -/// @ref gtx_hash -/// -/// @see core (dependence) -/// -/// @defgroup gtx_hash GLM_GTX_hash -/// @ingroup gtx -/// -/// @brief Add std::hash support for glm types -/// -/// need to be included to use the features of this extension. - -namespace glm { -namespace detail -{ - GLM_INLINE void hash_combine(size_t &seed, size_t hash) - { - hash += 0x9e3779b9 + (seed << 6) + (seed >> 2); - seed ^= hash; - } -}} - -namespace std -{ - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::vec<1, T, Q> const& v) const - { - hash hasher; - return hasher(v.x); - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::vec<2, T, Q> const& v) const - { - size_t seed = 0; - hash hasher; - glm::detail::hash_combine(seed, hasher(v.x)); - glm::detail::hash_combine(seed, hasher(v.y)); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::vec<3, T, Q> const& v) const - { - size_t seed = 0; - hash hasher; - glm::detail::hash_combine(seed, hasher(v.x)); - glm::detail::hash_combine(seed, hasher(v.y)); - glm::detail::hash_combine(seed, hasher(v.z)); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::vec<4, T, Q> const& v) const - { - size_t seed = 0; - hash hasher; - glm::detail::hash_combine(seed, hasher(v.x)); - glm::detail::hash_combine(seed, hasher(v.y)); - glm::detail::hash_combine(seed, hasher(v.z)); - glm::detail::hash_combine(seed, hasher(v.w)); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::qua const& q) const - { - size_t seed = 0; - hash hasher; - glm::detail::hash_combine(seed, hasher(q.x)); - glm::detail::hash_combine(seed, hasher(q.y)); - glm::detail::hash_combine(seed, hasher(q.z)); - glm::detail::hash_combine(seed, hasher(q.w)); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::tdualquat const& q) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(q.real)); - glm::detail::hash_combine(seed, hasher(q.dual)); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<2, 2, T, Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<2, 3, T, Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<2, 4, T, Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<3, 2, T, Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - glm::detail::hash_combine(seed, hasher(m[2])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<3, 3, T, Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - glm::detail::hash_combine(seed, hasher(m[2])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<3, 4, T, Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - glm::detail::hash_combine(seed, hasher(m[2])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<4, 2, T,Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - glm::detail::hash_combine(seed, hasher(m[2])); - glm::detail::hash_combine(seed, hasher(m[3])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<4, 3, T,Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - glm::detail::hash_combine(seed, hasher(m[2])); - glm::detail::hash_combine(seed, hasher(m[3])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<4, 4, T, Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - glm::detail::hash_combine(seed, hasher(m[2])); - glm::detail::hash_combine(seed, hasher(m[3])); - return seed; - } -} diff --git a/third_party/glm/gtx/integer.hpp b/third_party/glm/gtx/integer.hpp deleted file mode 100755 index d0b4c61..0000000 --- a/third_party/glm/gtx/integer.hpp +++ /dev/null @@ -1,76 +0,0 @@ -/// @ref gtx_integer -/// @file glm/gtx/integer.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_integer GLM_GTX_integer -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Add support for integer for core functions - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/integer.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_integer is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_integer extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_integer - /// @{ - - //! Returns x raised to the y power. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL int pow(int x, uint y); - - //! Returns the positive square root of x. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL int sqrt(int x); - - //! Returns the floor log2 of x. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL unsigned int floor_log2(unsigned int x); - - //! Modulus. Returns x - y * floor(x / y) for each component in x using the floating point value y. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL int mod(int x, int y); - - //! Return the factorial value of a number (!12 max, integer only) - //! From GLM_GTX_integer extension. - template - GLM_FUNC_DECL genType factorial(genType const& x); - - //! 32bit signed integer. - //! From GLM_GTX_integer extension. - typedef signed int sint; - - //! Returns x raised to the y power. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL uint pow(uint x, uint y); - - //! Returns the positive square root of x. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL uint sqrt(uint x); - - //! Modulus. Returns x - y * floor(x / y) for each component in x using the floating point value y. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL uint mod(uint x, uint y); - - //! Returns the number of leading zeros. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL uint nlz(uint x); - - /// @} -}//namespace glm - -#include "integer.inl" diff --git a/third_party/glm/gtx/integer.inl b/third_party/glm/gtx/integer.inl deleted file mode 100755 index 956366b..0000000 --- a/third_party/glm/gtx/integer.inl +++ /dev/null @@ -1,185 +0,0 @@ -/// @ref gtx_integer - -namespace glm -{ - // pow - GLM_FUNC_QUALIFIER int pow(int x, uint y) - { - if(y == 0) - return x >= 0 ? 1 : -1; - - int result = x; - for(uint i = 1; i < y; ++i) - result *= x; - return result; - } - - // sqrt: From Christopher J. Musial, An integer square root, Graphics Gems, 1990, page 387 - GLM_FUNC_QUALIFIER int sqrt(int x) - { - if(x <= 1) return x; - - int NextTrial = x >> 1; - int CurrentAnswer; - - do - { - CurrentAnswer = NextTrial; - NextTrial = (NextTrial + x / NextTrial) >> 1; - } while(NextTrial < CurrentAnswer); - - return CurrentAnswer; - } - -// Henry Gordon Dietz: http://aggregate.org/MAGIC/ -namespace detail -{ - GLM_FUNC_QUALIFIER unsigned int ones32(unsigned int x) - { - /* 32-bit recursive reduction using SWAR... - but first step is mapping 2-bit values - into sum of 2 1-bit values in sneaky way - */ - x -= ((x >> 1) & 0x55555555); - x = (((x >> 2) & 0x33333333) + (x & 0x33333333)); - x = (((x >> 4) + x) & 0x0f0f0f0f); - x += (x >> 8); - x += (x >> 16); - return(x & 0x0000003f); - } -}//namespace detail - - // Henry Gordon Dietz: http://aggregate.org/MAGIC/ -/* - GLM_FUNC_QUALIFIER unsigned int floor_log2(unsigned int x) - { - x |= (x >> 1); - x |= (x >> 2); - x |= (x >> 4); - x |= (x >> 8); - x |= (x >> 16); - - return _detail::ones32(x) >> 1; - } -*/ - // mod - GLM_FUNC_QUALIFIER int mod(int x, int y) - { - return ((x % y) + y) % y; - } - - // factorial (!12 max, integer only) - template - GLM_FUNC_QUALIFIER genType factorial(genType const& x) - { - genType Temp = x; - genType Result; - for(Result = 1; Temp > 1; --Temp) - Result *= Temp; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> factorial( - vec<2, T, Q> const& x) - { - return vec<2, T, Q>( - factorial(x.x), - factorial(x.y)); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> factorial( - vec<3, T, Q> const& x) - { - return vec<3, T, Q>( - factorial(x.x), - factorial(x.y), - factorial(x.z)); - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> factorial( - vec<4, T, Q> const& x) - { - return vec<4, T, Q>( - factorial(x.x), - factorial(x.y), - factorial(x.z), - factorial(x.w)); - } - - GLM_FUNC_QUALIFIER uint pow(uint x, uint y) - { - if (y == 0) - return 1u; - - uint result = x; - for(uint i = 1; i < y; ++i) - result *= x; - return result; - } - - GLM_FUNC_QUALIFIER uint sqrt(uint x) - { - if(x <= 1) return x; - - uint NextTrial = x >> 1; - uint CurrentAnswer; - - do - { - CurrentAnswer = NextTrial; - NextTrial = (NextTrial + x / NextTrial) >> 1; - } while(NextTrial < CurrentAnswer); - - return CurrentAnswer; - } - - GLM_FUNC_QUALIFIER uint mod(uint x, uint y) - { - return x - y * (x / y); - } - -#if(GLM_COMPILER & (GLM_COMPILER_VC | GLM_COMPILER_GCC)) - - GLM_FUNC_QUALIFIER unsigned int nlz(unsigned int x) - { - return 31u - findMSB(x); - } - -#else - - // Hackers Delight: http://www.hackersdelight.org/HDcode/nlz.c.txt - GLM_FUNC_QUALIFIER unsigned int nlz(unsigned int x) - { - int y, m, n; - - y = -int(x >> 16); // If left half of x is 0, - m = (y >> 16) & 16; // set n = 16. If left half - n = 16 - m; // is nonzero, set n = 0 and - x = x >> m; // shift x right 16. - // Now x is of the form 0000xxxx. - y = x - 0x100; // If positions 8-15 are 0, - m = (y >> 16) & 8; // add 8 to n and shift x left 8. - n = n + m; - x = x << m; - - y = x - 0x1000; // If positions 12-15 are 0, - m = (y >> 16) & 4; // add 4 to n and shift x left 4. - n = n + m; - x = x << m; - - y = x - 0x4000; // If positions 14-15 are 0, - m = (y >> 16) & 2; // add 2 to n and shift x left 2. - n = n + m; - x = x << m; - - y = x >> 14; // Set y = 0, 1, 2, or 3. - m = y & ~(y >> 1); // Set m = 0, 1, 2, or 2 resp. - return unsigned(n + 2 - m); - } - -#endif//(GLM_COMPILER) - -}//namespace glm diff --git a/third_party/glm/gtx/intersect.hpp b/third_party/glm/gtx/intersect.hpp deleted file mode 100755 index 3c78f2b..0000000 --- a/third_party/glm/gtx/intersect.hpp +++ /dev/null @@ -1,92 +0,0 @@ -/// @ref gtx_intersect -/// @file glm/gtx/intersect.hpp -/// -/// @see core (dependence) -/// @see gtx_closest_point (dependence) -/// -/// @defgroup gtx_intersect GLM_GTX_intersect -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Add intersection functions - -#pragma once - -// Dependency: -#include -#include -#include "../glm.hpp" -#include "../geometric.hpp" -#include "../gtx/closest_point.hpp" -#include "../gtx/vector_query.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_closest_point is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_closest_point extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_intersect - /// @{ - - //! Compute the intersection of a ray and a plane. - //! Ray direction and plane normal must be unit length. - //! From GLM_GTX_intersect extension. - template - GLM_FUNC_DECL bool intersectRayPlane( - genType const& orig, genType const& dir, - genType const& planeOrig, genType const& planeNormal, - typename genType::value_type & intersectionDistance); - - //! Compute the intersection of a ray and a triangle. - /// Based om Tomas Möller implementation http://fileadmin.cs.lth.se/cs/Personal/Tomas_Akenine-Moller/raytri/ - //! From GLM_GTX_intersect extension. - template - GLM_FUNC_DECL bool intersectRayTriangle( - vec<3, T, Q> const& orig, vec<3, T, Q> const& dir, - vec<3, T, Q> const& v0, vec<3, T, Q> const& v1, vec<3, T, Q> const& v2, - vec<2, T, Q>& baryPosition, T& distance); - - //! Compute the intersection of a line and a triangle. - //! From GLM_GTX_intersect extension. - template - GLM_FUNC_DECL bool intersectLineTriangle( - genType const& orig, genType const& dir, - genType const& vert0, genType const& vert1, genType const& vert2, - genType & position); - - //! Compute the intersection distance of a ray and a sphere. - //! The ray direction vector is unit length. - //! From GLM_GTX_intersect extension. - template - GLM_FUNC_DECL bool intersectRaySphere( - genType const& rayStarting, genType const& rayNormalizedDirection, - genType const& sphereCenter, typename genType::value_type const sphereRadiusSquered, - typename genType::value_type & intersectionDistance); - - //! Compute the intersection of a ray and a sphere. - //! From GLM_GTX_intersect extension. - template - GLM_FUNC_DECL bool intersectRaySphere( - genType const& rayStarting, genType const& rayNormalizedDirection, - genType const& sphereCenter, const typename genType::value_type sphereRadius, - genType & intersectionPosition, genType & intersectionNormal); - - //! Compute the intersection of a line and a sphere. - //! From GLM_GTX_intersect extension - template - GLM_FUNC_DECL bool intersectLineSphere( - genType const& point0, genType const& point1, - genType const& sphereCenter, typename genType::value_type sphereRadius, - genType & intersectionPosition1, genType & intersectionNormal1, - genType & intersectionPosition2 = genType(), genType & intersectionNormal2 = genType()); - - /// @} -}//namespace glm - -#include "intersect.inl" diff --git a/third_party/glm/gtx/intersect.inl b/third_party/glm/gtx/intersect.inl deleted file mode 100755 index 54ecb4d..0000000 --- a/third_party/glm/gtx/intersect.inl +++ /dev/null @@ -1,200 +0,0 @@ -/// @ref gtx_intersect - -namespace glm -{ - template - GLM_FUNC_QUALIFIER bool intersectRayPlane - ( - genType const& orig, genType const& dir, - genType const& planeOrig, genType const& planeNormal, - typename genType::value_type & intersectionDistance - ) - { - typename genType::value_type d = glm::dot(dir, planeNormal); - typename genType::value_type Epsilon = std::numeric_limits::epsilon(); - - if(glm::abs(d) > Epsilon) // if dir and planeNormal are not perpendicular - { - typename genType::value_type const tmp_intersectionDistance = glm::dot(planeOrig - orig, planeNormal) / d; - if (tmp_intersectionDistance > static_cast(0)) { // allow only intersections - intersectionDistance = tmp_intersectionDistance; - return true; - } - } - - return false; - } - - template - GLM_FUNC_QUALIFIER bool intersectRayTriangle - ( - vec<3, T, Q> const& orig, vec<3, T, Q> const& dir, - vec<3, T, Q> const& vert0, vec<3, T, Q> const& vert1, vec<3, T, Q> const& vert2, - vec<2, T, Q>& baryPosition, T& distance - ) - { - // find vectors for two edges sharing vert0 - vec<3, T, Q> const edge1 = vert1 - vert0; - vec<3, T, Q> const edge2 = vert2 - vert0; - - // begin calculating determinant - also used to calculate U parameter - vec<3, T, Q> const p = glm::cross(dir, edge2); - - // if determinant is near zero, ray lies in plane of triangle - T const det = glm::dot(edge1, p); - - vec<3, T, Q> Perpendicular(0); - - if(det > std::numeric_limits::epsilon()) - { - // calculate distance from vert0 to ray origin - vec<3, T, Q> const dist = orig - vert0; - - // calculate U parameter and test bounds - baryPosition.x = glm::dot(dist, p); - if(baryPosition.x < static_cast(0) || baryPosition.x > det) - return false; - - // prepare to test V parameter - Perpendicular = glm::cross(dist, edge1); - - // calculate V parameter and test bounds - baryPosition.y = glm::dot(dir, Perpendicular); - if((baryPosition.y < static_cast(0)) || ((baryPosition.x + baryPosition.y) > det)) - return false; - } - else if(det < -std::numeric_limits::epsilon()) - { - // calculate distance from vert0 to ray origin - vec<3, T, Q> const dist = orig - vert0; - - // calculate U parameter and test bounds - baryPosition.x = glm::dot(dist, p); - if((baryPosition.x > static_cast(0)) || (baryPosition.x < det)) - return false; - - // prepare to test V parameter - Perpendicular = glm::cross(dist, edge1); - - // calculate V parameter and test bounds - baryPosition.y = glm::dot(dir, Perpendicular); - if((baryPosition.y > static_cast(0)) || (baryPosition.x + baryPosition.y < det)) - return false; - } - else - return false; // ray is parallel to the plane of the triangle - - T inv_det = static_cast(1) / det; - - // calculate distance, ray intersects triangle - distance = glm::dot(edge2, Perpendicular) * inv_det; - baryPosition *= inv_det; - - return true; - } - - template - GLM_FUNC_QUALIFIER bool intersectLineTriangle - ( - genType const& orig, genType const& dir, - genType const& vert0, genType const& vert1, genType const& vert2, - genType & position - ) - { - typename genType::value_type Epsilon = std::numeric_limits::epsilon(); - - genType edge1 = vert1 - vert0; - genType edge2 = vert2 - vert0; - - genType Perpendicular = cross(dir, edge2); - - float det = dot(edge1, Perpendicular); - - if (det > -Epsilon && det < Epsilon) - return false; - typename genType::value_type inv_det = typename genType::value_type(1) / det; - - genType Tengant = orig - vert0; - - position.y = dot(Tengant, Perpendicular) * inv_det; - if (position.y < typename genType::value_type(0) || position.y > typename genType::value_type(1)) - return false; - - genType Cotengant = cross(Tengant, edge1); - - position.z = dot(dir, Cotengant) * inv_det; - if (position.z < typename genType::value_type(0) || position.y + position.z > typename genType::value_type(1)) - return false; - - position.x = dot(edge2, Cotengant) * inv_det; - - return true; - } - - template - GLM_FUNC_QUALIFIER bool intersectRaySphere - ( - genType const& rayStarting, genType const& rayNormalizedDirection, - genType const& sphereCenter, const typename genType::value_type sphereRadiusSquered, - typename genType::value_type & intersectionDistance - ) - { - typename genType::value_type Epsilon = std::numeric_limits::epsilon(); - genType diff = sphereCenter - rayStarting; - typename genType::value_type t0 = dot(diff, rayNormalizedDirection); - typename genType::value_type dSquared = dot(diff, diff) - t0 * t0; - if( dSquared > sphereRadiusSquered ) - { - return false; - } - typename genType::value_type t1 = sqrt( sphereRadiusSquered - dSquared ); - intersectionDistance = t0 > t1 + Epsilon ? t0 - t1 : t0 + t1; - return intersectionDistance > Epsilon; - } - - template - GLM_FUNC_QUALIFIER bool intersectRaySphere - ( - genType const& rayStarting, genType const& rayNormalizedDirection, - genType const& sphereCenter, const typename genType::value_type sphereRadius, - genType & intersectionPosition, genType & intersectionNormal - ) - { - typename genType::value_type distance; - if( intersectRaySphere( rayStarting, rayNormalizedDirection, sphereCenter, sphereRadius * sphereRadius, distance ) ) - { - intersectionPosition = rayStarting + rayNormalizedDirection * distance; - intersectionNormal = (intersectionPosition - sphereCenter) / sphereRadius; - return true; - } - return false; - } - - template - GLM_FUNC_QUALIFIER bool intersectLineSphere - ( - genType const& point0, genType const& point1, - genType const& sphereCenter, typename genType::value_type sphereRadius, - genType & intersectionPoint1, genType & intersectionNormal1, - genType & intersectionPoint2, genType & intersectionNormal2 - ) - { - typename genType::value_type Epsilon = std::numeric_limits::epsilon(); - genType dir = normalize(point1 - point0); - genType diff = sphereCenter - point0; - typename genType::value_type t0 = dot(diff, dir); - typename genType::value_type dSquared = dot(diff, diff) - t0 * t0; - if( dSquared > sphereRadius * sphereRadius ) - { - return false; - } - typename genType::value_type t1 = sqrt( sphereRadius * sphereRadius - dSquared ); - if( t0 < t1 + Epsilon ) - t1 = -t1; - intersectionPoint1 = point0 + dir * (t0 - t1); - intersectionNormal1 = (intersectionPoint1 - sphereCenter) / sphereRadius; - intersectionPoint2 = point0 + dir * (t0 + t1); - intersectionNormal2 = (intersectionPoint2 - sphereCenter) / sphereRadius; - return true; - } -}//namespace glm diff --git a/third_party/glm/gtx/io.hpp b/third_party/glm/gtx/io.hpp deleted file mode 100755 index 8d974f0..0000000 --- a/third_party/glm/gtx/io.hpp +++ /dev/null @@ -1,201 +0,0 @@ -/// @ref gtx_io -/// @file glm/gtx/io.hpp -/// @author Jan P Springer (regnirpsj@gmail.com) -/// -/// @see core (dependence) -/// @see gtc_matrix_access (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtx_io GLM_GTX_io -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// std::[w]ostream support for glm types -/// -/// std::[w]ostream support for glm types + qualifier/width/etc. manipulators -/// based on howard hinnant's std::chrono io proposal -/// [http://home.roadrunner.com/~hinnant/bloomington/chrono_io.html] - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtx/quaternion.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_io is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_io extension included") -# endif -#endif - -#include // std::basic_ostream<> (fwd) -#include // std::locale, std::locale::facet, std::locale::id -#include // std::pair<> - -namespace glm -{ - /// @addtogroup gtx_io - /// @{ - - namespace io - { - enum order_type { column_major, row_major}; - - template - class format_punct : public std::locale::facet - { - typedef CTy char_type; - - public: - - static std::locale::id id; - - bool formatted; - unsigned precision; - unsigned width; - char_type separator; - char_type delim_left; - char_type delim_right; - char_type space; - char_type newline; - order_type order; - - GLM_FUNC_DECL explicit format_punct(size_t a = 0); - GLM_FUNC_DECL explicit format_punct(format_punct const&); - }; - - template > - class basic_state_saver { - - public: - - GLM_FUNC_DECL explicit basic_state_saver(std::basic_ios&); - GLM_FUNC_DECL ~basic_state_saver(); - - private: - - typedef ::std::basic_ios state_type; - typedef typename state_type::char_type char_type; - typedef ::std::ios_base::fmtflags flags_type; - typedef ::std::streamsize streamsize_type; - typedef ::std::locale const locale_type; - - state_type& state_; - flags_type flags_; - streamsize_type precision_; - streamsize_type width_; - char_type fill_; - locale_type locale_; - - GLM_FUNC_DECL basic_state_saver& operator=(basic_state_saver const&); - }; - - typedef basic_state_saver state_saver; - typedef basic_state_saver wstate_saver; - - template > - class basic_format_saver - { - public: - - GLM_FUNC_DECL explicit basic_format_saver(std::basic_ios&); - GLM_FUNC_DECL ~basic_format_saver(); - - private: - - basic_state_saver const bss_; - - GLM_FUNC_DECL basic_format_saver& operator=(basic_format_saver const&); - }; - - typedef basic_format_saver format_saver; - typedef basic_format_saver wformat_saver; - - struct precision - { - unsigned value; - - GLM_FUNC_DECL explicit precision(unsigned); - }; - - struct width - { - unsigned value; - - GLM_FUNC_DECL explicit width(unsigned); - }; - - template - struct delimeter - { - CTy value[3]; - - GLM_FUNC_DECL explicit delimeter(CTy /* left */, CTy /* right */, CTy /* separator */ = ','); - }; - - struct order - { - order_type value; - - GLM_FUNC_DECL explicit order(order_type); - }; - - // functions, inlined (inline) - - template - FTy const& get_facet(std::basic_ios&); - template - std::basic_ios& formatted(std::basic_ios&); - template - std::basic_ios& unformattet(std::basic_ios&); - - template - std::basic_ostream& operator<<(std::basic_ostream&, precision const&); - template - std::basic_ostream& operator<<(std::basic_ostream&, width const&); - template - std::basic_ostream& operator<<(std::basic_ostream&, delimeter const&); - template - std::basic_ostream& operator<<(std::basic_ostream&, order const&); - }//namespace io - - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, qua const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<1, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<2, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<3, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<4, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<2, 2, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<2, 3, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<2, 4, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<3, 2, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<3, 3, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<3, 4, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<4, 2, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<4, 3, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<4, 4, T, Q> const&); - - template - GLM_FUNC_DECL std::basic_ostream & operator<<(std::basic_ostream &, - std::pair const, mat<4, 4, T, Q> const> const&); - - /// @} -}//namespace glm - -#include "io.inl" diff --git a/third_party/glm/gtx/io.inl b/third_party/glm/gtx/io.inl deleted file mode 100755 index a3a1bb6..0000000 --- a/third_party/glm/gtx/io.inl +++ /dev/null @@ -1,440 +0,0 @@ -/// @ref gtx_io -/// @author Jan P Springer (regnirpsj@gmail.com) - -#include // std::fixed, std::setfill<>, std::setprecision, std::right, std::setw -#include // std::basic_ostream<> -#include "../gtc/matrix_access.hpp" // glm::col, glm::row -#include "../gtx/type_trait.hpp" // glm::type<> - -namespace glm{ -namespace io -{ - template - GLM_FUNC_QUALIFIER format_punct::format_punct(size_t a) - : std::locale::facet(a) - , formatted(true) - , precision(3) - , width(1 + 4 + 1 + precision) - , separator(',') - , delim_left('[') - , delim_right(']') - , space(' ') - , newline('\n') - , order(column_major) - {} - - template - GLM_FUNC_QUALIFIER format_punct::format_punct(format_punct const& a) - : std::locale::facet(0) - , formatted(a.formatted) - , precision(a.precision) - , width(a.width) - , separator(a.separator) - , delim_left(a.delim_left) - , delim_right(a.delim_right) - , space(a.space) - , newline(a.newline) - , order(a.order) - {} - - template std::locale::id format_punct::id; - - template - GLM_FUNC_QUALIFIER basic_state_saver::basic_state_saver(std::basic_ios& a) - : state_(a) - , flags_(a.flags()) - , precision_(a.precision()) - , width_(a.width()) - , fill_(a.fill()) - , locale_(a.getloc()) - {} - - template - GLM_FUNC_QUALIFIER basic_state_saver::~basic_state_saver() - { - state_.imbue(locale_); - state_.fill(fill_); - state_.width(width_); - state_.precision(precision_); - state_.flags(flags_); - } - - template - GLM_FUNC_QUALIFIER basic_format_saver::basic_format_saver(std::basic_ios& a) - : bss_(a) - { - a.imbue(std::locale(a.getloc(), new format_punct(get_facet >(a)))); - } - - template - GLM_FUNC_QUALIFIER - basic_format_saver::~basic_format_saver() - {} - - GLM_FUNC_QUALIFIER precision::precision(unsigned a) - : value(a) - {} - - GLM_FUNC_QUALIFIER width::width(unsigned a) - : value(a) - {} - - template - GLM_FUNC_QUALIFIER delimeter::delimeter(CTy a, CTy b, CTy c) - : value() - { - value[0] = a; - value[1] = b; - value[2] = c; - } - - GLM_FUNC_QUALIFIER order::order(order_type a) - : value(a) - {} - - template - GLM_FUNC_QUALIFIER FTy const& get_facet(std::basic_ios& ios) - { - if(!std::has_facet(ios.getloc())) - ios.imbue(std::locale(ios.getloc(), new FTy)); - - return std::use_facet(ios.getloc()); - } - - template - GLM_FUNC_QUALIFIER std::basic_ios& formatted(std::basic_ios& ios) - { - const_cast&>(get_facet >(ios)).formatted = true; - return ios; - } - - template - GLM_FUNC_QUALIFIER std::basic_ios& unformatted(std::basic_ios& ios) - { - const_cast&>(get_facet >(ios)).formatted = false; - return ios; - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, precision const& a) - { - const_cast&>(get_facet >(os)).precision = a.value; - return os; - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, width const& a) - { - const_cast&>(get_facet >(os)).width = a.value; - return os; - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, delimeter const& a) - { - format_punct & fmt(const_cast&>(get_facet >(os))); - - fmt.delim_left = a.value[0]; - fmt.delim_right = a.value[1]; - fmt.separator = a.value[2]; - - return os; - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, order const& a) - { - const_cast&>(get_facet >(os)).order = a.value; - return os; - } -} // namespace io - -namespace detail -{ - template - GLM_FUNC_QUALIFIER std::basic_ostream& - print_vector_on(std::basic_ostream& os, V const& a) - { - typename std::basic_ostream::sentry const cerberus(os); - - if(cerberus) - { - io::format_punct const& fmt(io::get_facet >(os)); - - length_t const& components(type::components); - - if(fmt.formatted) - { - io::basic_state_saver const bss(os); - - os << std::fixed << std::right << std::setprecision(fmt.precision) << std::setfill(fmt.space) << fmt.delim_left; - - for(length_t i(0); i < components; ++i) - { - os << std::setw(fmt.width) << a[i]; - if(components-1 != i) - os << fmt.separator; - } - - os << fmt.delim_right; - } - else - { - for(length_t i(0); i < components; ++i) - { - os << a[i]; - - if(components-1 != i) - os << fmt.space; - } - } - } - - return os; - } -}//namespace detail - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, qua const& a) - { - return detail::print_vector_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<1, T, Q> const& a) - { - return detail::print_vector_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<2, T, Q> const& a) - { - return detail::print_vector_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<3, T, Q> const& a) - { - return detail::print_vector_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<4, T, Q> const& a) - { - return detail::print_vector_on(os, a); - } - -namespace detail -{ - template class M, length_t C, length_t R, typename T, qualifier Q> - GLM_FUNC_QUALIFIER std::basic_ostream& print_matrix_on(std::basic_ostream& os, M const& a) - { - typename std::basic_ostream::sentry const cerberus(os); - - if(cerberus) - { - io::format_punct const& fmt(io::get_facet >(os)); - - length_t const& cols(type >::cols); - length_t const& rows(type >::rows); - - if(fmt.formatted) - { - os << fmt.newline << fmt.delim_left; - - switch(fmt.order) - { - case io::column_major: - { - for(length_t i(0); i < rows; ++i) - { - if (0 != i) - os << fmt.space; - - os << row(a, i); - - if(rows-1 != i) - os << fmt.newline; - } - } - break; - - case io::row_major: - { - for(length_t i(0); i < cols; ++i) - { - if(0 != i) - os << fmt.space; - - os << column(a, i); - - if(cols-1 != i) - os << fmt.newline; - } - } - break; - } - - os << fmt.delim_right; - } - else - { - switch (fmt.order) - { - case io::column_major: - { - for(length_t i(0); i < cols; ++i) - { - os << column(a, i); - - if(cols - 1 != i) - os << fmt.space; - } - } - break; - - case io::row_major: - { - for (length_t i(0); i < rows; ++i) - { - os << row(a, i); - - if (rows-1 != i) - os << fmt.space; - } - } - break; - } - } - } - - return os; - } -}//namespace detail - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<2, 2, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<2, 3, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<2, 4, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<3, 2, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<3, 3, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<3, 4, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<4, 2, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<4, 3, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<4, 4, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - -namespace detail -{ - template class M, length_t C, length_t R, typename T, qualifier Q> - GLM_FUNC_QUALIFIER std::basic_ostream& print_matrix_pair_on(std::basic_ostream& os, std::pair const, M const> const& a) - { - typename std::basic_ostream::sentry const cerberus(os); - - if(cerberus) - { - io::format_punct const& fmt(io::get_facet >(os)); - M const& ml(a.first); - M const& mr(a.second); - length_t const& cols(type >::cols); - length_t const& rows(type >::rows); - - if(fmt.formatted) - { - os << fmt.newline << fmt.delim_left; - - switch(fmt.order) - { - case io::column_major: - { - for(length_t i(0); i < rows; ++i) - { - if(0 != i) - os << fmt.space; - - os << row(ml, i) << ((rows-1 != i) ? fmt.space : fmt.delim_right) << fmt.space << ((0 != i) ? fmt.space : fmt.delim_left) << row(mr, i); - - if(rows-1 != i) - os << fmt.newline; - } - } - break; - case io::row_major: - { - for(length_t i(0); i < cols; ++i) - { - if(0 != i) - os << fmt.space; - - os << column(ml, i) << ((cols-1 != i) ? fmt.space : fmt.delim_right) << fmt.space << ((0 != i) ? fmt.space : fmt.delim_left) << column(mr, i); - - if(cols-1 != i) - os << fmt.newline; - } - } - break; - } - - os << fmt.delim_right; - } - else - { - os << ml << fmt.space << mr; - } - } - - return os; - } -}//namespace detail - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<( - std::basic_ostream & os, - std::pair const, - mat<4, 4, T, Q> const> const& a) - { - return detail::print_matrix_pair_on(os, a); - } -}//namespace glm diff --git a/third_party/glm/gtx/log_base.hpp b/third_party/glm/gtx/log_base.hpp deleted file mode 100755 index ba28c9d..0000000 --- a/third_party/glm/gtx/log_base.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/// @ref gtx_log_base -/// @file glm/gtx/log_base.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_log_base GLM_GTX_log_base -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Logarithm for any base. base can be a vector or a scalar. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_log_base is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_log_base extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_log_base - /// @{ - - /// Logarithm for any base. - /// From GLM_GTX_log_base. - template - GLM_FUNC_DECL genType log( - genType const& x, - genType const& base); - - /// Logarithm for any base. - /// From GLM_GTX_log_base. - template - GLM_FUNC_DECL vec sign( - vec const& x, - vec const& base); - - /// @} -}//namespace glm - -#include "log_base.inl" diff --git a/third_party/glm/gtx/log_base.inl b/third_party/glm/gtx/log_base.inl deleted file mode 100755 index 4bbb8e8..0000000 --- a/third_party/glm/gtx/log_base.inl +++ /dev/null @@ -1,16 +0,0 @@ -/// @ref gtx_log_base - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType log(genType const& x, genType const& base) - { - return glm::log(x) / glm::log(base); - } - - template - GLM_FUNC_QUALIFIER vec log(vec const& x, vec const& base) - { - return glm::log(x) / glm::log(base); - } -}//namespace glm diff --git a/third_party/glm/gtx/matrix_cross_product.hpp b/third_party/glm/gtx/matrix_cross_product.hpp deleted file mode 100755 index 1e585f9..0000000 --- a/third_party/glm/gtx/matrix_cross_product.hpp +++ /dev/null @@ -1,47 +0,0 @@ -/// @ref gtx_matrix_cross_product -/// @file glm/gtx/matrix_cross_product.hpp -/// -/// @see core (dependence) -/// @see gtx_extented_min_max (dependence) -/// -/// @defgroup gtx_matrix_cross_product GLM_GTX_matrix_cross_product -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Build cross product matrices - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_cross_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_cross_product extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_matrix_cross_product - /// @{ - - //! Build a cross product matrix. - //! From GLM_GTX_matrix_cross_product extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> matrixCross3( - vec<3, T, Q> const& x); - - //! Build a cross product matrix. - //! From GLM_GTX_matrix_cross_product extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> matrixCross4( - vec<3, T, Q> const& x); - - /// @} -}//namespace glm - -#include "matrix_cross_product.inl" diff --git a/third_party/glm/gtx/matrix_cross_product.inl b/third_party/glm/gtx/matrix_cross_product.inl deleted file mode 100755 index 3a15397..0000000 --- a/third_party/glm/gtx/matrix_cross_product.inl +++ /dev/null @@ -1,37 +0,0 @@ -/// @ref gtx_matrix_cross_product - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> matrixCross3 - ( - vec<3, T, Q> const& x - ) - { - mat<3, 3, T, Q> Result(T(0)); - Result[0][1] = x.z; - Result[1][0] = -x.z; - Result[0][2] = -x.y; - Result[2][0] = x.y; - Result[1][2] = x.x; - Result[2][1] = -x.x; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> matrixCross4 - ( - vec<3, T, Q> const& x - ) - { - mat<4, 4, T, Q> Result(T(0)); - Result[0][1] = x.z; - Result[1][0] = -x.z; - Result[0][2] = -x.y; - Result[2][0] = x.y; - Result[1][2] = x.x; - Result[2][1] = -x.x; - return Result; - } - -}//namespace glm diff --git a/third_party/glm/gtx/matrix_decompose.hpp b/third_party/glm/gtx/matrix_decompose.hpp deleted file mode 100755 index acd7a7f..0000000 --- a/third_party/glm/gtx/matrix_decompose.hpp +++ /dev/null @@ -1,46 +0,0 @@ -/// @ref gtx_matrix_decompose -/// @file glm/gtx/matrix_decompose.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_matrix_decompose GLM_GTX_matrix_decompose -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Decomposes a model matrix to translations, rotation and scale components - -#pragma once - -// Dependencies -#include "../mat4x4.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include "../geometric.hpp" -#include "../gtc/quaternion.hpp" -#include "../gtc/matrix_transform.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_decompose is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_decompose extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_matrix_decompose - /// @{ - - /// Decomposes a model matrix to translations, rotation and scale components - /// @see gtx_matrix_decompose - template - GLM_FUNC_DECL bool decompose( - mat<4, 4, T, Q> const& modelMatrix, - vec<3, T, Q> & scale, qua & orientation, vec<3, T, Q> & translation, vec<3, T, Q> & skew, vec<4, T, Q> & perspective); - - /// @} -}//namespace glm - -#include "matrix_decompose.inl" diff --git a/third_party/glm/gtx/matrix_decompose.inl b/third_party/glm/gtx/matrix_decompose.inl deleted file mode 100755 index 694f5ec..0000000 --- a/third_party/glm/gtx/matrix_decompose.inl +++ /dev/null @@ -1,186 +0,0 @@ -/// @ref gtx_matrix_decompose - -#include "../gtc/constants.hpp" -#include "../gtc/epsilon.hpp" - -namespace glm{ -namespace detail -{ - /// Make a linear combination of two vectors and return the result. - // result = (a * ascl) + (b * bscl) - template - GLM_FUNC_QUALIFIER vec<3, T, Q> combine( - vec<3, T, Q> const& a, - vec<3, T, Q> const& b, - T ascl, T bscl) - { - return (a * ascl) + (b * bscl); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> scale(vec<3, T, Q> const& v, T desiredLength) - { - return v * desiredLength / length(v); - } -}//namespace detail - - // Matrix decompose - // http://www.opensource.apple.com/source/WebCore/WebCore-514/platform/graphics/transforms/TransformationMatrix.cpp - // Decomposes the mode matrix to translations,rotation scale components - - template - GLM_FUNC_QUALIFIER bool decompose(mat<4, 4, T, Q> const& ModelMatrix, vec<3, T, Q> & Scale, qua & Orientation, vec<3, T, Q> & Translation, vec<3, T, Q> & Skew, vec<4, T, Q> & Perspective) - { - mat<4, 4, T, Q> LocalMatrix(ModelMatrix); - - // Normalize the matrix. - if(epsilonEqual(LocalMatrix[3][3], static_cast(0), epsilon())) - return false; - - for(length_t i = 0; i < 4; ++i) - for(length_t j = 0; j < 4; ++j) - LocalMatrix[i][j] /= LocalMatrix[3][3]; - - // perspectiveMatrix is used to solve for perspective, but it also provides - // an easy way to test for singularity of the upper 3x3 component. - mat<4, 4, T, Q> PerspectiveMatrix(LocalMatrix); - - for(length_t i = 0; i < 3; i++) - PerspectiveMatrix[i][3] = static_cast(0); - PerspectiveMatrix[3][3] = static_cast(1); - - /// TODO: Fixme! - if(epsilonEqual(determinant(PerspectiveMatrix), static_cast(0), epsilon())) - return false; - - // First, isolate perspective. This is the messiest. - if( - epsilonNotEqual(LocalMatrix[0][3], static_cast(0), epsilon()) || - epsilonNotEqual(LocalMatrix[1][3], static_cast(0), epsilon()) || - epsilonNotEqual(LocalMatrix[2][3], static_cast(0), epsilon())) - { - // rightHandSide is the right hand side of the equation. - vec<4, T, Q> RightHandSide; - RightHandSide[0] = LocalMatrix[0][3]; - RightHandSide[1] = LocalMatrix[1][3]; - RightHandSide[2] = LocalMatrix[2][3]; - RightHandSide[3] = LocalMatrix[3][3]; - - // Solve the equation by inverting PerspectiveMatrix and multiplying - // rightHandSide by the inverse. (This is the easiest way, not - // necessarily the best.) - mat<4, 4, T, Q> InversePerspectiveMatrix = glm::inverse(PerspectiveMatrix);// inverse(PerspectiveMatrix, inversePerspectiveMatrix); - mat<4, 4, T, Q> TransposedInversePerspectiveMatrix = glm::transpose(InversePerspectiveMatrix);// transposeMatrix4(inversePerspectiveMatrix, transposedInversePerspectiveMatrix); - - Perspective = TransposedInversePerspectiveMatrix * RightHandSide; - // v4MulPointByMatrix(rightHandSide, transposedInversePerspectiveMatrix, perspectivePoint); - - // Clear the perspective partition - LocalMatrix[0][3] = LocalMatrix[1][3] = LocalMatrix[2][3] = static_cast(0); - LocalMatrix[3][3] = static_cast(1); - } - else - { - // No perspective. - Perspective = vec<4, T, Q>(0, 0, 0, 1); - } - - // Next take care of translation (easy). - Translation = vec<3, T, Q>(LocalMatrix[3]); - LocalMatrix[3] = vec<4, T, Q>(0, 0, 0, LocalMatrix[3].w); - - vec<3, T, Q> Row[3], Pdum3; - - // Now get scale and shear. - for(length_t i = 0; i < 3; ++i) - for(length_t j = 0; j < 3; ++j) - Row[i][j] = LocalMatrix[i][j]; - - // Compute X scale factor and normalize first row. - Scale.x = length(Row[0]);// v3Length(Row[0]); - - Row[0] = detail::scale(Row[0], static_cast(1)); - - // Compute XY shear factor and make 2nd row orthogonal to 1st. - Skew.z = dot(Row[0], Row[1]); - Row[1] = detail::combine(Row[1], Row[0], static_cast(1), -Skew.z); - - // Now, compute Y scale and normalize 2nd row. - Scale.y = length(Row[1]); - Row[1] = detail::scale(Row[1], static_cast(1)); - Skew.z /= Scale.y; - - // Compute XZ and YZ shears, orthogonalize 3rd row. - Skew.y = glm::dot(Row[0], Row[2]); - Row[2] = detail::combine(Row[2], Row[0], static_cast(1), -Skew.y); - Skew.x = glm::dot(Row[1], Row[2]); - Row[2] = detail::combine(Row[2], Row[1], static_cast(1), -Skew.x); - - // Next, get Z scale and normalize 3rd row. - Scale.z = length(Row[2]); - Row[2] = detail::scale(Row[2], static_cast(1)); - Skew.y /= Scale.z; - Skew.x /= Scale.z; - - // At this point, the matrix (in rows[]) is orthonormal. - // Check for a coordinate system flip. If the determinant - // is -1, then negate the matrix and the scaling factors. - Pdum3 = cross(Row[1], Row[2]); // v3Cross(row[1], row[2], Pdum3); - if(dot(Row[0], Pdum3) < 0) - { - for(length_t i = 0; i < 3; i++) - { - Scale[i] *= static_cast(-1); - Row[i] *= static_cast(-1); - } - } - - // Now, get the rotations out, as described in the gem. - - // FIXME - Add the ability to return either quaternions (which are - // easier to recompose with) or Euler angles (rx, ry, rz), which - // are easier for authors to deal with. The latter will only be useful - // when we fix https://bugs.webkit.org/show_bug.cgi?id=23799, so I - // will leave the Euler angle code here for now. - - // ret.rotateY = asin(-Row[0][2]); - // if (cos(ret.rotateY) != 0) { - // ret.rotateX = atan2(Row[1][2], Row[2][2]); - // ret.rotateZ = atan2(Row[0][1], Row[0][0]); - // } else { - // ret.rotateX = atan2(-Row[2][0], Row[1][1]); - // ret.rotateZ = 0; - // } - - int i, j, k = 0; - T root, trace = Row[0].x + Row[1].y + Row[2].z; - if(trace > static_cast(0)) - { - root = sqrt(trace + static_cast(1.0)); - Orientation.w = static_cast(0.5) * root; - root = static_cast(0.5) / root; - Orientation.x = root * (Row[1].z - Row[2].y); - Orientation.y = root * (Row[2].x - Row[0].z); - Orientation.z = root * (Row[0].y - Row[1].x); - } // End if > 0 - else - { - static int Next[3] = {1, 2, 0}; - i = 0; - if(Row[1].y > Row[0].x) i = 1; - if(Row[2].z > Row[i][i]) i = 2; - j = Next[i]; - k = Next[j]; - - root = sqrt(Row[i][i] - Row[j][j] - Row[k][k] + static_cast(1.0)); - - Orientation[i] = static_cast(0.5) * root; - root = static_cast(0.5) / root; - Orientation[j] = root * (Row[i][j] + Row[j][i]); - Orientation[k] = root * (Row[i][k] + Row[k][i]); - Orientation.w = root * (Row[j][k] - Row[k][j]); - } // End if <= 0 - - return true; - } -}//namespace glm diff --git a/third_party/glm/gtx/matrix_factorisation.hpp b/third_party/glm/gtx/matrix_factorisation.hpp deleted file mode 100755 index 5a975d6..0000000 --- a/third_party/glm/gtx/matrix_factorisation.hpp +++ /dev/null @@ -1,69 +0,0 @@ -/// @ref gtx_matrix_factorisation -/// @file glm/gtx/matrix_factorisation.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_matrix_factorisation GLM_GTX_matrix_factorisation -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Functions to factor matrices in various forms - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_factorisation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_factorisation extension included") -# endif -#endif - -/* -Suggestions: - - Move helper functions flipud and fliplr to another file: They may be helpful in more general circumstances. - - Implement other types of matrix factorisation, such as: QL and LQ, L(D)U, eigendecompositions, etc... -*/ - -namespace glm -{ - /// @addtogroup gtx_matrix_factorisation - /// @{ - - /// Flips the matrix rows up and down. - /// - /// From GLM_GTX_matrix_factorisation extension. - template - GLM_FUNC_DECL mat flipud(mat const& in); - - /// Flips the matrix columns right and left. - /// - /// From GLM_GTX_matrix_factorisation extension. - template - GLM_FUNC_DECL mat fliplr(mat const& in); - - /// Performs QR factorisation of a matrix. - /// Returns 2 matrices, q and r, such that the columns of q are orthonormal and span the same subspace than those of the input matrix, r is an upper triangular matrix, and q*r=in. - /// Given an n-by-m input matrix, q has dimensions min(n,m)-by-m, and r has dimensions n-by-min(n,m). - /// - /// From GLM_GTX_matrix_factorisation extension. - template - GLM_FUNC_DECL void qr_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& q, mat& r); - - /// Performs RQ factorisation of a matrix. - /// Returns 2 matrices, r and q, such that r is an upper triangular matrix, the rows of q are orthonormal and span the same subspace than those of the input matrix, and r*q=in. - /// Note that in the context of RQ factorisation, the diagonal is seen as starting in the lower-right corner of the matrix, instead of the usual upper-left. - /// Given an n-by-m input matrix, r has dimensions min(n,m)-by-m, and q has dimensions n-by-min(n,m). - /// - /// From GLM_GTX_matrix_factorisation extension. - template - GLM_FUNC_DECL void rq_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& r, mat& q); - - /// @} -} - -#include "matrix_factorisation.inl" diff --git a/third_party/glm/gtx/matrix_factorisation.inl b/third_party/glm/gtx/matrix_factorisation.inl deleted file mode 100755 index c479b8a..0000000 --- a/third_party/glm/gtx/matrix_factorisation.inl +++ /dev/null @@ -1,84 +0,0 @@ -/// @ref gtx_matrix_factorisation - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat flipud(mat const& in) - { - mat tin = transpose(in); - tin = fliplr(tin); - mat out = transpose(tin); - - return out; - } - - template - GLM_FUNC_QUALIFIER mat fliplr(mat const& in) - { - mat out; - for (length_t i = 0; i < C; i++) - { - out[i] = in[(C - i) - 1]; - } - - return out; - } - - template - GLM_FUNC_QUALIFIER void qr_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& q, mat& r) - { - // Uses modified Gram-Schmidt method - // Source: https://en.wikipedia.org/wiki/Gram–Schmidt_process - // And https://en.wikipedia.org/wiki/QR_decomposition - - //For all the linearly independs columns of the input... - // (there can be no more linearly independents columns than there are rows.) - for (length_t i = 0; i < (C < R ? C : R); i++) - { - //Copy in Q the input's i-th column. - q[i] = in[i]; - - //j = [0,i[ - // Make that column orthogonal to all the previous ones by substracting to it the non-orthogonal projection of all the previous columns. - // Also: Fill the zero elements of R - for (length_t j = 0; j < i; j++) - { - q[i] -= dot(q[i], q[j])*q[j]; - r[j][i] = 0; - } - - //Now, Q i-th column is orthogonal to all the previous columns. Normalize it. - q[i] = normalize(q[i]); - - //j = [i,C[ - //Finally, compute the corresponding coefficients of R by computing the projection of the resulting column on the other columns of the input. - for (length_t j = i; j < C; j++) - { - r[j][i] = dot(in[j], q[i]); - } - } - } - - template - GLM_FUNC_QUALIFIER void rq_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& r, mat& q) - { - // From https://en.wikipedia.org/wiki/QR_decomposition: - // The RQ decomposition transforms a matrix A into the product of an upper triangular matrix R (also known as right-triangular) and an orthogonal matrix Q. The only difference from QR decomposition is the order of these matrices. - // QR decomposition is Gram–Schmidt orthogonalization of columns of A, started from the first column. - // RQ decomposition is Gram–Schmidt orthogonalization of rows of A, started from the last row. - - mat tin = transpose(in); - tin = fliplr(tin); - - mat tr; - mat<(C < R ? C : R), C, T, Q> tq; - qr_decompose(tin, tq, tr); - - tr = fliplr(tr); - r = transpose(tr); - r = fliplr(r); - - tq = fliplr(tq); - q = transpose(tq); - } -} //namespace glm diff --git a/third_party/glm/gtx/matrix_interpolation.hpp b/third_party/glm/gtx/matrix_interpolation.hpp deleted file mode 100755 index 7d5ad4c..0000000 --- a/third_party/glm/gtx/matrix_interpolation.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/// @ref gtx_matrix_interpolation -/// @file glm/gtx/matrix_interpolation.hpp -/// @author Ghenadii Ursachi (the.asteroth@gmail.com) -/// -/// @see core (dependence) -/// -/// @defgroup gtx_matrix_interpolation GLM_GTX_matrix_interpolation -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Allows to directly interpolate two matrices. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_interpolation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_interpolation extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_matrix_interpolation - /// @{ - - /// Get the axis and angle of the rotation from a matrix. - /// From GLM_GTX_matrix_interpolation extension. - template - GLM_FUNC_DECL void axisAngle( - mat<4, 4, T, Q> const& Mat, vec<3, T, Q> & Axis, T & Angle); - - /// Build a matrix from axis and angle. - /// From GLM_GTX_matrix_interpolation extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> axisAngleMatrix( - vec<3, T, Q> const& Axis, T const Angle); - - /// Extracts the rotation part of a matrix. - /// From GLM_GTX_matrix_interpolation extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> extractMatrixRotation( - mat<4, 4, T, Q> const& Mat); - - /// Build a interpolation of 4 * 4 matrixes. - /// From GLM_GTX_matrix_interpolation extension. - /// Warning! works only with rotation and/or translation matrixes, scale will generate unexpected results. - template - GLM_FUNC_DECL mat<4, 4, T, Q> interpolate( - mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2, T const Delta); - - /// @} -}//namespace glm - -#include "matrix_interpolation.inl" diff --git a/third_party/glm/gtx/matrix_interpolation.inl b/third_party/glm/gtx/matrix_interpolation.inl deleted file mode 100755 index de40b7d..0000000 --- a/third_party/glm/gtx/matrix_interpolation.inl +++ /dev/null @@ -1,129 +0,0 @@ -/// @ref gtx_matrix_interpolation - -#include "../gtc/constants.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER void axisAngle(mat<4, 4, T, Q> const& m, vec<3, T, Q> & axis, T& angle) - { - T epsilon = static_cast(0.01); - T epsilon2 = static_cast(0.1); - - if((abs(m[1][0] - m[0][1]) < epsilon) && (abs(m[2][0] - m[0][2]) < epsilon) && (abs(m[2][1] - m[1][2]) < epsilon)) - { - if ((abs(m[1][0] + m[0][1]) < epsilon2) && (abs(m[2][0] + m[0][2]) < epsilon2) && (abs(m[2][1] + m[1][2]) < epsilon2) && (abs(m[0][0] + m[1][1] + m[2][2] - static_cast(3.0)) < epsilon2)) - { - angle = static_cast(0.0); - axis.x = static_cast(1.0); - axis.y = static_cast(0.0); - axis.z = static_cast(0.0); - return; - } - angle = static_cast(3.1415926535897932384626433832795); - T xx = (m[0][0] + static_cast(1.0)) * static_cast(0.5); - T yy = (m[1][1] + static_cast(1.0)) * static_cast(0.5); - T zz = (m[2][2] + static_cast(1.0)) * static_cast(0.5); - T xy = (m[1][0] + m[0][1]) * static_cast(0.25); - T xz = (m[2][0] + m[0][2]) * static_cast(0.25); - T yz = (m[2][1] + m[1][2]) * static_cast(0.25); - if((xx > yy) && (xx > zz)) - { - if(xx < epsilon) - { - axis.x = static_cast(0.0); - axis.y = static_cast(0.7071); - axis.z = static_cast(0.7071); - } - else - { - axis.x = sqrt(xx); - axis.y = xy / axis.x; - axis.z = xz / axis.x; - } - } - else if (yy > zz) - { - if(yy < epsilon) - { - axis.x = static_cast(0.7071); - axis.y = static_cast(0.0); - axis.z = static_cast(0.7071); - } - else - { - axis.y = sqrt(yy); - axis.x = xy / axis.y; - axis.z = yz / axis.y; - } - } - else - { - if (zz < epsilon) - { - axis.x = static_cast(0.7071); - axis.y = static_cast(0.7071); - axis.z = static_cast(0.0); - } - else - { - axis.z = sqrt(zz); - axis.x = xz / axis.z; - axis.y = yz / axis.z; - } - } - return; - } - T s = sqrt((m[2][1] - m[1][2]) * (m[2][1] - m[1][2]) + (m[2][0] - m[0][2]) * (m[2][0] - m[0][2]) + (m[1][0] - m[0][1]) * (m[1][0] - m[0][1])); - if (glm::abs(s) < T(0.001)) - s = static_cast(1); - T const angleCos = (m[0][0] + m[1][1] + m[2][2] - static_cast(1)) * static_cast(0.5); - if(angleCos - static_cast(1) < epsilon) - angle = pi() * static_cast(0.25); - else - angle = acos(angleCos); - axis.x = (m[1][2] - m[2][1]) / s; - axis.y = (m[2][0] - m[0][2]) / s; - axis.z = (m[0][1] - m[1][0]) / s; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> axisAngleMatrix(vec<3, T, Q> const& axis, T const angle) - { - T c = cos(angle); - T s = sin(angle); - T t = static_cast(1) - c; - vec<3, T, Q> n = normalize(axis); - - return mat<4, 4, T, Q>( - t * n.x * n.x + c, t * n.x * n.y + n.z * s, t * n.x * n.z - n.y * s, static_cast(0.0), - t * n.x * n.y - n.z * s, t * n.y * n.y + c, t * n.y * n.z + n.x * s, static_cast(0.0), - t * n.x * n.z + n.y * s, t * n.y * n.z - n.x * s, t * n.z * n.z + c, static_cast(0.0), - static_cast(0.0), static_cast(0.0), static_cast(0.0), static_cast(1.0)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> extractMatrixRotation(mat<4, 4, T, Q> const& m) - { - return mat<4, 4, T, Q>( - m[0][0], m[0][1], m[0][2], static_cast(0.0), - m[1][0], m[1][1], m[1][2], static_cast(0.0), - m[2][0], m[2][1], m[2][2], static_cast(0.0), - static_cast(0.0), static_cast(0.0), static_cast(0.0), static_cast(1.0)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> interpolate(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2, T const delta) - { - mat<4, 4, T, Q> m1rot = extractMatrixRotation(m1); - mat<4, 4, T, Q> dltRotation = m2 * transpose(m1rot); - vec<3, T, Q> dltAxis; - T dltAngle; - axisAngle(dltRotation, dltAxis, dltAngle); - mat<4, 4, T, Q> out = axisAngleMatrix(dltAxis, dltAngle * delta) * m1rot; - out[3][0] = m1[3][0] + delta * (m2[3][0] - m1[3][0]); - out[3][1] = m1[3][1] + delta * (m2[3][1] - m1[3][1]); - out[3][2] = m1[3][2] + delta * (m2[3][2] - m1[3][2]); - return out; - } -}//namespace glm diff --git a/third_party/glm/gtx/matrix_major_storage.hpp b/third_party/glm/gtx/matrix_major_storage.hpp deleted file mode 100755 index 8c6bc22..0000000 --- a/third_party/glm/gtx/matrix_major_storage.hpp +++ /dev/null @@ -1,119 +0,0 @@ -/// @ref gtx_matrix_major_storage -/// @file glm/gtx/matrix_major_storage.hpp -/// -/// @see core (dependence) -/// @see gtx_extented_min_max (dependence) -/// -/// @defgroup gtx_matrix_major_storage GLM_GTX_matrix_major_storage -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Build matrices with specific matrix order, row or column - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_major_storage is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_major_storage extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_matrix_major_storage - /// @{ - - //! Build a row major matrix from row vectors. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<2, 2, T, Q> rowMajor2( - vec<2, T, Q> const& v1, - vec<2, T, Q> const& v2); - - //! Build a row major matrix from other matrix. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<2, 2, T, Q> rowMajor2( - mat<2, 2, T, Q> const& m); - - //! Build a row major matrix from row vectors. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> rowMajor3( - vec<3, T, Q> const& v1, - vec<3, T, Q> const& v2, - vec<3, T, Q> const& v3); - - //! Build a row major matrix from other matrix. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> rowMajor3( - mat<3, 3, T, Q> const& m); - - //! Build a row major matrix from row vectors. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> rowMajor4( - vec<4, T, Q> const& v1, - vec<4, T, Q> const& v2, - vec<4, T, Q> const& v3, - vec<4, T, Q> const& v4); - - //! Build a row major matrix from other matrix. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> rowMajor4( - mat<4, 4, T, Q> const& m); - - //! Build a column major matrix from column vectors. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<2, 2, T, Q> colMajor2( - vec<2, T, Q> const& v1, - vec<2, T, Q> const& v2); - - //! Build a column major matrix from other matrix. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<2, 2, T, Q> colMajor2( - mat<2, 2, T, Q> const& m); - - //! Build a column major matrix from column vectors. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> colMajor3( - vec<3, T, Q> const& v1, - vec<3, T, Q> const& v2, - vec<3, T, Q> const& v3); - - //! Build a column major matrix from other matrix. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> colMajor3( - mat<3, 3, T, Q> const& m); - - //! Build a column major matrix from column vectors. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> colMajor4( - vec<4, T, Q> const& v1, - vec<4, T, Q> const& v2, - vec<4, T, Q> const& v3, - vec<4, T, Q> const& v4); - - //! Build a column major matrix from other matrix. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> colMajor4( - mat<4, 4, T, Q> const& m); - - /// @} -}//namespace glm - -#include "matrix_major_storage.inl" diff --git a/third_party/glm/gtx/matrix_major_storage.inl b/third_party/glm/gtx/matrix_major_storage.inl deleted file mode 100755 index 279dd34..0000000 --- a/third_party/glm/gtx/matrix_major_storage.inl +++ /dev/null @@ -1,166 +0,0 @@ -/// @ref gtx_matrix_major_storage - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> rowMajor2 - ( - vec<2, T, Q> const& v1, - vec<2, T, Q> const& v2 - ) - { - mat<2, 2, T, Q> Result; - Result[0][0] = v1.x; - Result[1][0] = v1.y; - Result[0][1] = v2.x; - Result[1][1] = v2.y; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> rowMajor2( - const mat<2, 2, T, Q>& m) - { - mat<2, 2, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rowMajor3( - const vec<3, T, Q>& v1, - const vec<3, T, Q>& v2, - const vec<3, T, Q>& v3) - { - mat<3, 3, T, Q> Result; - Result[0][0] = v1.x; - Result[1][0] = v1.y; - Result[2][0] = v1.z; - Result[0][1] = v2.x; - Result[1][1] = v2.y; - Result[2][1] = v2.z; - Result[0][2] = v3.x; - Result[1][2] = v3.y; - Result[2][2] = v3.z; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rowMajor3( - const mat<3, 3, T, Q>& m) - { - mat<3, 3, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - Result[2][2] = m[2][2]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rowMajor4( - const vec<4, T, Q>& v1, - const vec<4, T, Q>& v2, - const vec<4, T, Q>& v3, - const vec<4, T, Q>& v4) - { - mat<4, 4, T, Q> Result; - Result[0][0] = v1.x; - Result[1][0] = v1.y; - Result[2][0] = v1.z; - Result[3][0] = v1.w; - Result[0][1] = v2.x; - Result[1][1] = v2.y; - Result[2][1] = v2.z; - Result[3][1] = v2.w; - Result[0][2] = v3.x; - Result[1][2] = v3.y; - Result[2][2] = v3.z; - Result[3][2] = v3.w; - Result[0][3] = v4.x; - Result[1][3] = v4.y; - Result[2][3] = v4.z; - Result[3][3] = v4.w; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rowMajor4( - const mat<4, 4, T, Q>& m) - { - mat<4, 4, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - Result[0][3] = m[3][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - Result[1][3] = m[3][1]; - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - Result[2][2] = m[2][2]; - Result[2][3] = m[3][2]; - Result[3][0] = m[0][3]; - Result[3][1] = m[1][3]; - Result[3][2] = m[2][3]; - Result[3][3] = m[3][3]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> colMajor2( - const vec<2, T, Q>& v1, - const vec<2, T, Q>& v2) - { - return mat<2, 2, T, Q>(v1, v2); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> colMajor2( - const mat<2, 2, T, Q>& m) - { - return mat<2, 2, T, Q>(m); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> colMajor3( - const vec<3, T, Q>& v1, - const vec<3, T, Q>& v2, - const vec<3, T, Q>& v3) - { - return mat<3, 3, T, Q>(v1, v2, v3); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> colMajor3( - const mat<3, 3, T, Q>& m) - { - return mat<3, 3, T, Q>(m); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> colMajor4( - const vec<4, T, Q>& v1, - const vec<4, T, Q>& v2, - const vec<4, T, Q>& v3, - const vec<4, T, Q>& v4) - { - return mat<4, 4, T, Q>(v1, v2, v3, v4); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> colMajor4( - const mat<4, 4, T, Q>& m) - { - return mat<4, 4, T, Q>(m); - } -}//namespace glm diff --git a/third_party/glm/gtx/matrix_operation.hpp b/third_party/glm/gtx/matrix_operation.hpp deleted file mode 100755 index de6ff1f..0000000 --- a/third_party/glm/gtx/matrix_operation.hpp +++ /dev/null @@ -1,103 +0,0 @@ -/// @ref gtx_matrix_operation -/// @file glm/gtx/matrix_operation.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_matrix_operation GLM_GTX_matrix_operation -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Build diagonal matrices from vectors. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_operation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_operation extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_matrix_operation - /// @{ - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<2, 2, T, Q> diagonal2x2( - vec<2, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<2, 3, T, Q> diagonal2x3( - vec<2, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<2, 4, T, Q> diagonal2x4( - vec<2, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<3, 2, T, Q> diagonal3x2( - vec<2, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> diagonal3x3( - vec<3, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<3, 4, T, Q> diagonal3x4( - vec<3, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<4, 2, T, Q> diagonal4x2( - vec<2, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<4, 3, T, Q> diagonal4x3( - vec<3, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> diagonal4x4( - vec<4, T, Q> const& v); - - /// Build an adjugate matrix. - /// From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<2, 2, T, Q> adjugate(mat<2, 2, T, Q> const& m); - - /// Build an adjugate matrix. - /// From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> adjugate(mat<3, 3, T, Q> const& m); - - /// Build an adjugate matrix. - /// From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> adjugate(mat<4, 4, T, Q> const& m); - - /// @} -}//namespace glm - -#include "matrix_operation.inl" diff --git a/third_party/glm/gtx/matrix_operation.inl b/third_party/glm/gtx/matrix_operation.inl deleted file mode 100755 index 9de83f8..0000000 --- a/third_party/glm/gtx/matrix_operation.inl +++ /dev/null @@ -1,176 +0,0 @@ -/// @ref gtx_matrix_operation - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> diagonal2x2 - ( - vec<2, T, Q> const& v - ) - { - mat<2, 2, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> diagonal2x3 - ( - vec<2, T, Q> const& v - ) - { - mat<2, 3, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> diagonal2x4 - ( - vec<2, T, Q> const& v - ) - { - mat<2, 4, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> diagonal3x2 - ( - vec<2, T, Q> const& v - ) - { - mat<3, 2, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> diagonal3x3 - ( - vec<3, T, Q> const& v - ) - { - mat<3, 3, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - Result[2][2] = v[2]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> diagonal3x4 - ( - vec<3, T, Q> const& v - ) - { - mat<3, 4, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - Result[2][2] = v[2]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> diagonal4x4 - ( - vec<4, T, Q> const& v - ) - { - mat<4, 4, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - Result[2][2] = v[2]; - Result[3][3] = v[3]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> diagonal4x3 - ( - vec<3, T, Q> const& v - ) - { - mat<4, 3, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - Result[2][2] = v[2]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> diagonal4x2 - ( - vec<2, T, Q> const& v - ) - { - mat<4, 2, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> adjugate(mat<2, 2, T, Q> const& m) - { - return mat<2, 2, T, Q>( - +m[1][1], -m[1][0], - -m[0][1], +m[0][0]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> adjugate(mat<3, 3, T, Q> const& m) - { - T const m00 = determinant(mat<2, 2, T, Q>(m[1][1], m[2][1], m[1][2], m[2][2])); - T const m01 = determinant(mat<2, 2, T, Q>(m[0][1], m[2][1], m[0][2], m[2][2])); - T const m02 = determinant(mat<2, 2, T, Q>(m[0][1], m[1][1], m[0][2], m[1][2])); - - T const m10 = determinant(mat<2, 2, T, Q>(m[1][0], m[2][0], m[1][2], m[2][2])); - T const m11 = determinant(mat<2, 2, T, Q>(m[0][0], m[2][0], m[0][2], m[2][2])); - T const m12 = determinant(mat<2, 2, T, Q>(m[0][0], m[1][0], m[0][2], m[1][2])); - - T const m20 = determinant(mat<2, 2, T, Q>(m[1][0], m[2][0], m[1][1], m[2][1])); - T const m21 = determinant(mat<2, 2, T, Q>(m[0][0], m[2][0], m[0][1], m[2][1])); - T const m22 = determinant(mat<2, 2, T, Q>(m[0][0], m[1][0], m[0][1], m[1][1])); - - return mat<3, 3, T, Q>( - +m00, -m01, +m02, - -m10, +m11, -m12, - +m20, -m21, +m22); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> adjugate(mat<4, 4, T, Q> const& m) - { - T const m00 = determinant(mat<3, 3, T, Q>(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3])); - T const m01 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3])); - T const m02 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][1], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][1], m[3][3])); - T const m03 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2])); - - T const m10 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3])); - T const m11 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3])); - T const m12 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3])); - T const m13 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2])); - - T const m20 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], m[3][3])); - T const m21 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], m[3][3])); - T const m22 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], m[3][3])); - T const m23 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], m[3][2])); - - T const m30 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3])); - T const m31 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3])); - T const m32 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3])); - T const m33 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2])); - - return mat<4, 4, T, Q>( - +m00, -m01, +m02, -m03, - -m10, +m11, -m12, +m13, - +m20, -m21, +m22, -m23, - -m30, +m31, -m32, +m33); - } -}//namespace glm diff --git a/third_party/glm/gtx/matrix_query.hpp b/third_party/glm/gtx/matrix_query.hpp deleted file mode 100755 index 8011b2b..0000000 --- a/third_party/glm/gtx/matrix_query.hpp +++ /dev/null @@ -1,77 +0,0 @@ -/// @ref gtx_matrix_query -/// @file glm/gtx/matrix_query.hpp -/// -/// @see core (dependence) -/// @see gtx_vector_query (dependence) -/// -/// @defgroup gtx_matrix_query GLM_GTX_matrix_query -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Query to evaluate matrix properties - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtx/vector_query.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_query is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_query extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_matrix_query - /// @{ - - /// Return whether a matrix a null matrix. - /// From GLM_GTX_matrix_query extension. - template - GLM_FUNC_DECL bool isNull(mat<2, 2, T, Q> const& m, T const& epsilon); - - /// Return whether a matrix a null matrix. - /// From GLM_GTX_matrix_query extension. - template - GLM_FUNC_DECL bool isNull(mat<3, 3, T, Q> const& m, T const& epsilon); - - /// Return whether a matrix is a null matrix. - /// From GLM_GTX_matrix_query extension. - template - GLM_FUNC_DECL bool isNull(mat<4, 4, T, Q> const& m, T const& epsilon); - - /// Return whether a matrix is an identity matrix. - /// From GLM_GTX_matrix_query extension. - template class matType> - GLM_FUNC_DECL bool isIdentity(matType const& m, T const& epsilon); - - /// Return whether a matrix is a normalized matrix. - /// From GLM_GTX_matrix_query extension. - template - GLM_FUNC_DECL bool isNormalized(mat<2, 2, T, Q> const& m, T const& epsilon); - - /// Return whether a matrix is a normalized matrix. - /// From GLM_GTX_matrix_query extension. - template - GLM_FUNC_DECL bool isNormalized(mat<3, 3, T, Q> const& m, T const& epsilon); - - /// Return whether a matrix is a normalized matrix. - /// From GLM_GTX_matrix_query extension. - template - GLM_FUNC_DECL bool isNormalized(mat<4, 4, T, Q> const& m, T const& epsilon); - - /// Return whether a matrix is an orthonormalized matrix. - /// From GLM_GTX_matrix_query extension. - template class matType> - GLM_FUNC_DECL bool isOrthogonal(matType const& m, T const& epsilon); - - /// @} -}//namespace glm - -#include "matrix_query.inl" diff --git a/third_party/glm/gtx/matrix_query.inl b/third_party/glm/gtx/matrix_query.inl deleted file mode 100755 index 77bd231..0000000 --- a/third_party/glm/gtx/matrix_query.inl +++ /dev/null @@ -1,113 +0,0 @@ -/// @ref gtx_matrix_query - -namespace glm -{ - template - GLM_FUNC_QUALIFIER bool isNull(mat<2, 2, T, Q> const& m, T const& epsilon) - { - bool result = true; - for(length_t i = 0; result && i < m.length() ; ++i) - result = isNull(m[i], epsilon); - return result; - } - - template - GLM_FUNC_QUALIFIER bool isNull(mat<3, 3, T, Q> const& m, T const& epsilon) - { - bool result = true; - for(length_t i = 0; result && i < m.length() ; ++i) - result = isNull(m[i], epsilon); - return result; - } - - template - GLM_FUNC_QUALIFIER bool isNull(mat<4, 4, T, Q> const& m, T const& epsilon) - { - bool result = true; - for(length_t i = 0; result && i < m.length() ; ++i) - result = isNull(m[i], epsilon); - return result; - } - - template - GLM_FUNC_QUALIFIER bool isIdentity(mat const& m, T const& epsilon) - { - bool result = true; - for(length_t i = 0; result && i < m[0].length() ; ++i) - { - for(length_t j = 0; result && j < i ; ++j) - result = abs(m[i][j]) <= epsilon; - if(result) - result = abs(m[i][i] - 1) <= epsilon; - for(length_t j = i + 1; result && j < m.length(); ++j) - result = abs(m[i][j]) <= epsilon; - } - return result; - } - - template - GLM_FUNC_QUALIFIER bool isNormalized(mat<2, 2, T, Q> const& m, T const& epsilon) - { - bool result(true); - for(length_t i = 0; result && i < m.length(); ++i) - result = isNormalized(m[i], epsilon); - for(length_t i = 0; result && i < m.length(); ++i) - { - typename mat<2, 2, T, Q>::col_type v; - for(length_t j = 0; j < m.length(); ++j) - v[j] = m[j][i]; - result = isNormalized(v, epsilon); - } - return result; - } - - template - GLM_FUNC_QUALIFIER bool isNormalized(mat<3, 3, T, Q> const& m, T const& epsilon) - { - bool result(true); - for(length_t i = 0; result && i < m.length(); ++i) - result = isNormalized(m[i], epsilon); - for(length_t i = 0; result && i < m.length(); ++i) - { - typename mat<3, 3, T, Q>::col_type v; - for(length_t j = 0; j < m.length(); ++j) - v[j] = m[j][i]; - result = isNormalized(v, epsilon); - } - return result; - } - - template - GLM_FUNC_QUALIFIER bool isNormalized(mat<4, 4, T, Q> const& m, T const& epsilon) - { - bool result(true); - for(length_t i = 0; result && i < m.length(); ++i) - result = isNormalized(m[i], epsilon); - for(length_t i = 0; result && i < m.length(); ++i) - { - typename mat<4, 4, T, Q>::col_type v; - for(length_t j = 0; j < m.length(); ++j) - v[j] = m[j][i]; - result = isNormalized(v, epsilon); - } - return result; - } - - template - GLM_FUNC_QUALIFIER bool isOrthogonal(mat const& m, T const& epsilon) - { - bool result = true; - for(length_t i(0); result && i < m.length() - 1; ++i) - for(length_t j(i + 1); result && j < m.length(); ++j) - result = areOrthogonal(m[i], m[j], epsilon); - - if(result) - { - mat tmp = transpose(m); - for(length_t i(0); result && i < m.length() - 1 ; ++i) - for(length_t j(i + 1); result && j < m.length(); ++j) - result = areOrthogonal(tmp[i], tmp[j], epsilon); - } - return result; - } -}//namespace glm diff --git a/third_party/glm/gtx/matrix_transform_2d.hpp b/third_party/glm/gtx/matrix_transform_2d.hpp deleted file mode 100755 index 5f9c540..0000000 --- a/third_party/glm/gtx/matrix_transform_2d.hpp +++ /dev/null @@ -1,81 +0,0 @@ -/// @ref gtx_matrix_transform_2d -/// @file glm/gtx/matrix_transform_2d.hpp -/// @author Miguel Ãngel Pérez Martínez -/// -/// @see core (dependence) -/// -/// @defgroup gtx_matrix_transform_2d GLM_GTX_matrix_transform_2d -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Defines functions that generate common 2d transformation matrices. - -#pragma once - -// Dependency: -#include "../mat3x3.hpp" -#include "../vec2.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_transform_2d is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_transform_2d extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_matrix_transform_2d - /// @{ - - /// Builds a translation 3 * 3 matrix created from a vector of 2 components. - /// - /// @param m Input matrix multiplied by this translation matrix. - /// @param v Coordinates of a translation vector. - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> translate( - mat<3, 3, T, Q> const& m, - vec<2, T, Q> const& v); - - /// Builds a rotation 3 * 3 matrix created from an angle. - /// - /// @param m Input matrix multiplied by this translation matrix. - /// @param angle Rotation angle expressed in radians. - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rotate( - mat<3, 3, T, Q> const& m, - T angle); - - /// Builds a scale 3 * 3 matrix created from a vector of 2 components. - /// - /// @param m Input matrix multiplied by this translation matrix. - /// @param v Coordinates of a scale vector. - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> scale( - mat<3, 3, T, Q> const& m, - vec<2, T, Q> const& v); - - /// Builds an horizontal (parallel to the x axis) shear 3 * 3 matrix. - /// - /// @param m Input matrix multiplied by this translation matrix. - /// @param y Shear factor. - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX( - mat<3, 3, T, Q> const& m, - T y); - - /// Builds a vertical (parallel to the y axis) shear 3 * 3 matrix. - /// - /// @param m Input matrix multiplied by this translation matrix. - /// @param x Shear factor. - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY( - mat<3, 3, T, Q> const& m, - T x); - - /// @} -}//namespace glm - -#include "matrix_transform_2d.inl" diff --git a/third_party/glm/gtx/matrix_transform_2d.inl b/third_party/glm/gtx/matrix_transform_2d.inl deleted file mode 100755 index a68d24d..0000000 --- a/third_party/glm/gtx/matrix_transform_2d.inl +++ /dev/null @@ -1,68 +0,0 @@ -/// @ref gtx_matrix_transform_2d -/// @author Miguel Ãngel Pérez Martínez - -#include "../trigonometric.hpp" - -namespace glm -{ - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> translate( - mat<3, 3, T, Q> const& m, - vec<2, T, Q> const& v) - { - mat<3, 3, T, Q> Result(m); - Result[2] = m[0] * v[0] + m[1] * v[1] + m[2]; - return Result; - } - - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rotate( - mat<3, 3, T, Q> const& m, - T angle) - { - T const a = angle; - T const c = cos(a); - T const s = sin(a); - - mat<3, 3, T, Q> Result; - Result[0] = m[0] * c + m[1] * s; - Result[1] = m[0] * -s + m[1] * c; - Result[2] = m[2]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> scale( - mat<3, 3, T, Q> const& m, - vec<2, T, Q> const& v) - { - mat<3, 3, T, Q> Result; - Result[0] = m[0] * v[0]; - Result[1] = m[1] * v[1]; - Result[2] = m[2]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX( - mat<3, 3, T, Q> const& m, - T y) - { - mat<3, 3, T, Q> Result(1); - Result[0][1] = y; - return m * Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY( - mat<3, 3, T, Q> const& m, - T x) - { - mat<3, 3, T, Q> Result(1); - Result[1][0] = x; - return m * Result; - } - -}//namespace glm diff --git a/third_party/glm/gtx/mixed_product.hpp b/third_party/glm/gtx/mixed_product.hpp deleted file mode 100755 index b242e35..0000000 --- a/third_party/glm/gtx/mixed_product.hpp +++ /dev/null @@ -1,41 +0,0 @@ -/// @ref gtx_mixed_product -/// @file glm/gtx/mixed_product.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_mixed_product GLM_GTX_mixed_producte -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Mixed product of 3 vectors. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_mixed_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_mixed_product extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_mixed_product - /// @{ - - /// @brief Mixed product of 3 vectors (from GLM_GTX_mixed_product extension) - template - GLM_FUNC_DECL T mixedProduct( - vec<3, T, Q> const& v1, - vec<3, T, Q> const& v2, - vec<3, T, Q> const& v3); - - /// @} -}// namespace glm - -#include "mixed_product.inl" diff --git a/third_party/glm/gtx/mixed_product.inl b/third_party/glm/gtx/mixed_product.inl deleted file mode 100755 index e5cdbdb..0000000 --- a/third_party/glm/gtx/mixed_product.inl +++ /dev/null @@ -1,15 +0,0 @@ -/// @ref gtx_mixed_product - -namespace glm -{ - template - GLM_FUNC_QUALIFIER T mixedProduct - ( - vec<3, T, Q> const& v1, - vec<3, T, Q> const& v2, - vec<3, T, Q> const& v3 - ) - { - return dot(cross(v1, v2), v3); - } -}//namespace glm diff --git a/third_party/glm/gtx/norm.hpp b/third_party/glm/gtx/norm.hpp deleted file mode 100755 index dfaebb7..0000000 --- a/third_party/glm/gtx/norm.hpp +++ /dev/null @@ -1,88 +0,0 @@ -/// @ref gtx_norm -/// @file glm/gtx/norm.hpp -/// -/// @see core (dependence) -/// @see gtx_quaternion (dependence) -/// @see gtx_component_wise (dependence) -/// -/// @defgroup gtx_norm GLM_GTX_norm -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Various ways to compute vector norms. - -#pragma once - -// Dependency: -#include "../geometric.hpp" -#include "../gtx/quaternion.hpp" -#include "../gtx/component_wise.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_norm is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_norm extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_norm - /// @{ - - /// Returns the squared length of x. - /// From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T length2(vec const& x); - - /// Returns the squared distance between p0 and p1, i.e., length2(p0 - p1). - /// From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T distance2(vec const& p0, vec const& p1); - - //! Returns the L1 norm between x and y. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T l1Norm(vec<3, T, Q> const& x, vec<3, T, Q> const& y); - - //! Returns the L1 norm of v. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T l1Norm(vec<3, T, Q> const& v); - - //! Returns the L2 norm between x and y. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T l2Norm(vec<3, T, Q> const& x, vec<3, T, Q> const& y); - - //! Returns the L2 norm of v. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T l2Norm(vec<3, T, Q> const& x); - - //! Returns the L norm between x and y. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T lxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y, unsigned int Depth); - - //! Returns the L norm of v. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T lxNorm(vec<3, T, Q> const& x, unsigned int Depth); - - //! Returns the LMax norm between x and y. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T lMaxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y); - - //! Returns the LMax norm of v. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T lMaxNorm(vec<3, T, Q> const& x); - - /// @} -}//namespace glm - -#include "norm.inl" diff --git a/third_party/glm/gtx/norm.inl b/third_party/glm/gtx/norm.inl deleted file mode 100755 index 6db561b..0000000 --- a/third_party/glm/gtx/norm.inl +++ /dev/null @@ -1,95 +0,0 @@ -/// @ref gtx_norm - -#include "../detail/qualifier.hpp" - -namespace glm{ -namespace detail -{ - template - struct compute_length2 - { - GLM_FUNC_QUALIFIER static T call(vec const& v) - { - return dot(v, v); - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER genType length2(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'length2' accepts only floating-point inputs"); - return x * x; - } - - template - GLM_FUNC_QUALIFIER T length2(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'length2' accepts only floating-point inputs"); - return detail::compute_length2::value>::call(v); - } - - template - GLM_FUNC_QUALIFIER T distance2(T p0, T p1) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'distance2' accepts only floating-point inputs"); - return length2(p1 - p0); - } - - template - GLM_FUNC_QUALIFIER T distance2(vec const& p0, vec const& p1) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'distance2' accepts only floating-point inputs"); - return length2(p1 - p0); - } - - template - GLM_FUNC_QUALIFIER T l1Norm(vec<3, T, Q> const& a, vec<3, T, Q> const& b) - { - return abs(b.x - a.x) + abs(b.y - a.y) + abs(b.z - a.z); - } - - template - GLM_FUNC_QUALIFIER T l1Norm(vec<3, T, Q> const& v) - { - return abs(v.x) + abs(v.y) + abs(v.z); - } - - template - GLM_FUNC_QUALIFIER T l2Norm(vec<3, T, Q> const& a, vec<3, T, Q> const& b - ) - { - return length(b - a); - } - - template - GLM_FUNC_QUALIFIER T l2Norm(vec<3, T, Q> const& v) - { - return length(v); - } - - template - GLM_FUNC_QUALIFIER T lxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y, unsigned int Depth) - { - return pow(pow(abs(y.x - x.x), T(Depth)) + pow(abs(y.y - x.y), T(Depth)) + pow(abs(y.z - x.z), T(Depth)), T(1) / T(Depth)); - } - - template - GLM_FUNC_QUALIFIER T lxNorm(vec<3, T, Q> const& v, unsigned int Depth) - { - return pow(pow(abs(v.x), T(Depth)) + pow(abs(v.y), T(Depth)) + pow(abs(v.z), T(Depth)), T(1) / T(Depth)); - } - - template - GLM_FUNC_QUALIFIER T lMaxNorm(vec<3, T, Q> const& a, vec<3, T, Q> const& b) - { - return compMax(abs(b - a)); - } - - template - GLM_FUNC_QUALIFIER T lMaxNorm(vec<3, T, Q> const& v) - { - return compMax(abs(v)); - } - -}//namespace glm diff --git a/third_party/glm/gtx/normal.hpp b/third_party/glm/gtx/normal.hpp deleted file mode 100755 index 068682f..0000000 --- a/third_party/glm/gtx/normal.hpp +++ /dev/null @@ -1,41 +0,0 @@ -/// @ref gtx_normal -/// @file glm/gtx/normal.hpp -/// -/// @see core (dependence) -/// @see gtx_extented_min_max (dependence) -/// -/// @defgroup gtx_normal GLM_GTX_normal -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Compute the normal of a triangle. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_normal is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_normal extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_normal - /// @{ - - /// Computes triangle normal from triangle points. - /// - /// @see gtx_normal - template - GLM_FUNC_DECL vec<3, T, Q> triangleNormal(vec<3, T, Q> const& p1, vec<3, T, Q> const& p2, vec<3, T, Q> const& p3); - - /// @} -}//namespace glm - -#include "normal.inl" diff --git a/third_party/glm/gtx/normal.inl b/third_party/glm/gtx/normal.inl deleted file mode 100755 index 74f9fc9..0000000 --- a/third_party/glm/gtx/normal.inl +++ /dev/null @@ -1,15 +0,0 @@ -/// @ref gtx_normal - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> triangleNormal - ( - vec<3, T, Q> const& p1, - vec<3, T, Q> const& p2, - vec<3, T, Q> const& p3 - ) - { - return normalize(cross(p1 - p2, p1 - p3)); - } -}//namespace glm diff --git a/third_party/glm/gtx/normalize_dot.hpp b/third_party/glm/gtx/normalize_dot.hpp deleted file mode 100755 index 5195802..0000000 --- a/third_party/glm/gtx/normalize_dot.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref gtx_normalize_dot -/// @file glm/gtx/normalize_dot.hpp -/// -/// @see core (dependence) -/// @see gtx_fast_square_root (dependence) -/// -/// @defgroup gtx_normalize_dot GLM_GTX_normalize_dot -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Dot product of vectors that need to be normalize with a single square root. - -#pragma once - -// Dependency: -#include "../gtx/fast_square_root.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_normalize_dot is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_normalize_dot extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_normalize_dot - /// @{ - - /// Normalize parameters and returns the dot product of x and y. - /// It's faster that dot(normalize(x), normalize(y)). - /// - /// @see gtx_normalize_dot extension. - template - GLM_FUNC_DECL T normalizeDot(vec const& x, vec const& y); - - /// Normalize parameters and returns the dot product of x and y. - /// Faster that dot(fastNormalize(x), fastNormalize(y)). - /// - /// @see gtx_normalize_dot extension. - template - GLM_FUNC_DECL T fastNormalizeDot(vec const& x, vec const& y); - - /// @} -}//namespace glm - -#include "normalize_dot.inl" diff --git a/third_party/glm/gtx/normalize_dot.inl b/third_party/glm/gtx/normalize_dot.inl deleted file mode 100755 index 7bcd9a5..0000000 --- a/third_party/glm/gtx/normalize_dot.inl +++ /dev/null @@ -1,16 +0,0 @@ -/// @ref gtx_normalize_dot - -namespace glm -{ - template - GLM_FUNC_QUALIFIER T normalizeDot(vec const& x, vec const& y) - { - return glm::dot(x, y) * glm::inversesqrt(glm::dot(x, x) * glm::dot(y, y)); - } - - template - GLM_FUNC_QUALIFIER T fastNormalizeDot(vec const& x, vec const& y) - { - return glm::dot(x, y) * glm::fastInverseSqrt(glm::dot(x, x) * glm::dot(y, y)); - } -}//namespace glm diff --git a/third_party/glm/gtx/number_precision.hpp b/third_party/glm/gtx/number_precision.hpp deleted file mode 100755 index 3a606bd..0000000 --- a/third_party/glm/gtx/number_precision.hpp +++ /dev/null @@ -1,61 +0,0 @@ -/// @ref gtx_number_precision -/// @file glm/gtx/number_precision.hpp -/// -/// @see core (dependence) -/// @see gtc_type_precision (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtx_number_precision GLM_GTX_number_precision -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Defined size types. - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/type_precision.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_number_precision is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_number_precision extension included") -# endif -#endif - -namespace glm{ -namespace gtx -{ - ///////////////////////////// - // Unsigned int vector types - - /// @addtogroup gtx_number_precision - /// @{ - - typedef u8 u8vec1; //!< \brief 8bit unsigned integer scalar. (from GLM_GTX_number_precision extension) - typedef u16 u16vec1; //!< \brief 16bit unsigned integer scalar. (from GLM_GTX_number_precision extension) - typedef u32 u32vec1; //!< \brief 32bit unsigned integer scalar. (from GLM_GTX_number_precision extension) - typedef u64 u64vec1; //!< \brief 64bit unsigned integer scalar. (from GLM_GTX_number_precision extension) - - ////////////////////// - // Float vector types - - typedef f32 f32vec1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) - typedef f64 f64vec1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) - - ////////////////////// - // Float matrix types - - typedef f32 f32mat1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) - typedef f32 f32mat1x1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) - typedef f64 f64mat1; //!< \brief Double-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) - typedef f64 f64mat1x1; //!< \brief Double-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) - - /// @} -}//namespace gtx -}//namespace glm - -#include "number_precision.inl" diff --git a/third_party/glm/gtx/number_precision.inl b/third_party/glm/gtx/number_precision.inl deleted file mode 100755 index b39d71c..0000000 --- a/third_party/glm/gtx/number_precision.inl +++ /dev/null @@ -1,6 +0,0 @@ -/// @ref gtx_number_precision - -namespace glm -{ - -} diff --git a/third_party/glm/gtx/optimum_pow.hpp b/third_party/glm/gtx/optimum_pow.hpp deleted file mode 100755 index 9284a47..0000000 --- a/third_party/glm/gtx/optimum_pow.hpp +++ /dev/null @@ -1,54 +0,0 @@ -/// @ref gtx_optimum_pow -/// @file glm/gtx/optimum_pow.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_optimum_pow GLM_GTX_optimum_pow -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Integer exponentiation of power functions. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_optimum_pow is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_optimum_pow extension included") -# endif -#endif - -namespace glm{ -namespace gtx -{ - /// @addtogroup gtx_optimum_pow - /// @{ - - /// Returns x raised to the power of 2. - /// - /// @see gtx_optimum_pow - template - GLM_FUNC_DECL genType pow2(genType const& x); - - /// Returns x raised to the power of 3. - /// - /// @see gtx_optimum_pow - template - GLM_FUNC_DECL genType pow3(genType const& x); - - /// Returns x raised to the power of 4. - /// - /// @see gtx_optimum_pow - template - GLM_FUNC_DECL genType pow4(genType const& x); - - /// @} -}//namespace gtx -}//namespace glm - -#include "optimum_pow.inl" diff --git a/third_party/glm/gtx/optimum_pow.inl b/third_party/glm/gtx/optimum_pow.inl deleted file mode 100755 index a26c19c..0000000 --- a/third_party/glm/gtx/optimum_pow.inl +++ /dev/null @@ -1,22 +0,0 @@ -/// @ref gtx_optimum_pow - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType pow2(genType const& x) - { - return x * x; - } - - template - GLM_FUNC_QUALIFIER genType pow3(genType const& x) - { - return x * x * x; - } - - template - GLM_FUNC_QUALIFIER genType pow4(genType const& x) - { - return (x * x) * (x * x); - } -}//namespace glm diff --git a/third_party/glm/gtx/orthonormalize.hpp b/third_party/glm/gtx/orthonormalize.hpp deleted file mode 100755 index 3e004fb..0000000 --- a/third_party/glm/gtx/orthonormalize.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref gtx_orthonormalize -/// @file glm/gtx/orthonormalize.hpp -/// -/// @see core (dependence) -/// @see gtx_extented_min_max (dependence) -/// -/// @defgroup gtx_orthonormalize GLM_GTX_orthonormalize -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Orthonormalize matrices. - -#pragma once - -// Dependency: -#include "../vec3.hpp" -#include "../mat3x3.hpp" -#include "../geometric.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_orthonormalize is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_orthonormalize extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_orthonormalize - /// @{ - - /// Returns the orthonormalized matrix of m. - /// - /// @see gtx_orthonormalize - template - GLM_FUNC_DECL mat<3, 3, T, Q> orthonormalize(mat<3, 3, T, Q> const& m); - - /// Orthonormalizes x according y. - /// - /// @see gtx_orthonormalize - template - GLM_FUNC_DECL vec<3, T, Q> orthonormalize(vec<3, T, Q> const& x, vec<3, T, Q> const& y); - - /// @} -}//namespace glm - -#include "orthonormalize.inl" diff --git a/third_party/glm/gtx/orthonormalize.inl b/third_party/glm/gtx/orthonormalize.inl deleted file mode 100755 index cb553ba..0000000 --- a/third_party/glm/gtx/orthonormalize.inl +++ /dev/null @@ -1,29 +0,0 @@ -/// @ref gtx_orthonormalize - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> orthonormalize(mat<3, 3, T, Q> const& m) - { - mat<3, 3, T, Q> r = m; - - r[0] = normalize(r[0]); - - T d0 = dot(r[0], r[1]); - r[1] -= r[0] * d0; - r[1] = normalize(r[1]); - - T d1 = dot(r[1], r[2]); - d0 = dot(r[0], r[2]); - r[2] -= r[0] * d0 + r[1] * d1; - r[2] = normalize(r[2]); - - return r; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> orthonormalize(vec<3, T, Q> const& x, vec<3, T, Q> const& y) - { - return normalize(x - y * dot(y, x)); - } -}//namespace glm diff --git a/third_party/glm/gtx/perpendicular.hpp b/third_party/glm/gtx/perpendicular.hpp deleted file mode 100755 index 72b77b6..0000000 --- a/third_party/glm/gtx/perpendicular.hpp +++ /dev/null @@ -1,41 +0,0 @@ -/// @ref gtx_perpendicular -/// @file glm/gtx/perpendicular.hpp -/// -/// @see core (dependence) -/// @see gtx_projection (dependence) -/// -/// @defgroup gtx_perpendicular GLM_GTX_perpendicular -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Perpendicular of a vector from other one - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtx/projection.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_perpendicular is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_perpendicular extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_perpendicular - /// @{ - - //! Projects x a perpendicular axis of Normal. - //! From GLM_GTX_perpendicular extension. - template - GLM_FUNC_DECL genType perp(genType const& x, genType const& Normal); - - /// @} -}//namespace glm - -#include "perpendicular.inl" diff --git a/third_party/glm/gtx/perpendicular.inl b/third_party/glm/gtx/perpendicular.inl deleted file mode 100755 index 1e72f33..0000000 --- a/third_party/glm/gtx/perpendicular.inl +++ /dev/null @@ -1,10 +0,0 @@ -/// @ref gtx_perpendicular - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType perp(genType const& x, genType const& Normal) - { - return x - proj(x, Normal); - } -}//namespace glm diff --git a/third_party/glm/gtx/polar_coordinates.hpp b/third_party/glm/gtx/polar_coordinates.hpp deleted file mode 100755 index b399112..0000000 --- a/third_party/glm/gtx/polar_coordinates.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/// @ref gtx_polar_coordinates -/// @file glm/gtx/polar_coordinates.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_polar_coordinates GLM_GTX_polar_coordinates -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Conversion from Euclidean space to polar space and revert. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_polar_coordinates is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_polar_coordinates extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_polar_coordinates - /// @{ - - /// Convert Euclidean to Polar coordinates, x is the xz distance, y, the latitude and z the longitude. - /// - /// @see gtx_polar_coordinates - template - GLM_FUNC_DECL vec<3, T, Q> polar( - vec<3, T, Q> const& euclidean); - - /// Convert Polar to Euclidean coordinates. - /// - /// @see gtx_polar_coordinates - template - GLM_FUNC_DECL vec<3, T, Q> euclidean( - vec<2, T, Q> const& polar); - - /// @} -}//namespace glm - -#include "polar_coordinates.inl" diff --git a/third_party/glm/gtx/polar_coordinates.inl b/third_party/glm/gtx/polar_coordinates.inl deleted file mode 100755 index 371c8dd..0000000 --- a/third_party/glm/gtx/polar_coordinates.inl +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref gtx_polar_coordinates - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> polar - ( - vec<3, T, Q> const& euclidean - ) - { - T const Length(length(euclidean)); - vec<3, T, Q> const tmp(euclidean / Length); - T const xz_dist(sqrt(tmp.x * tmp.x + tmp.z * tmp.z)); - - return vec<3, T, Q>( - asin(tmp.y), // latitude - atan(tmp.x, tmp.z), // longitude - xz_dist); // xz distance - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> euclidean - ( - vec<2, T, Q> const& polar - ) - { - T const latitude(polar.x); - T const longitude(polar.y); - - return vec<3, T, Q>( - cos(latitude) * sin(longitude), - sin(latitude), - cos(latitude) * cos(longitude)); - } - -}//namespace glm diff --git a/third_party/glm/gtx/projection.hpp b/third_party/glm/gtx/projection.hpp deleted file mode 100755 index 678f3ad..0000000 --- a/third_party/glm/gtx/projection.hpp +++ /dev/null @@ -1,43 +0,0 @@ -/// @ref gtx_projection -/// @file glm/gtx/projection.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_projection GLM_GTX_projection -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Projection of a vector to other one - -#pragma once - -// Dependency: -#include "../geometric.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_projection is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_projection extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_projection - /// @{ - - /// Projects x on Normal. - /// - /// @param[in] x A vector to project - /// @param[in] Normal A normal that doesn't need to be of unit length. - /// - /// @see gtx_projection - template - GLM_FUNC_DECL genType proj(genType const& x, genType const& Normal); - - /// @} -}//namespace glm - -#include "projection.inl" diff --git a/third_party/glm/gtx/projection.inl b/third_party/glm/gtx/projection.inl deleted file mode 100755 index f23f884..0000000 --- a/third_party/glm/gtx/projection.inl +++ /dev/null @@ -1,10 +0,0 @@ -/// @ref gtx_projection - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType proj(genType const& x, genType const& Normal) - { - return glm::dot(x, Normal) / glm::dot(Normal, Normal) * Normal; - } -}//namespace glm diff --git a/third_party/glm/gtx/quaternion.hpp b/third_party/glm/gtx/quaternion.hpp deleted file mode 100755 index 5c2b5ad..0000000 --- a/third_party/glm/gtx/quaternion.hpp +++ /dev/null @@ -1,174 +0,0 @@ -/// @ref gtx_quaternion -/// @file glm/gtx/quaternion.hpp -/// -/// @see core (dependence) -/// @see gtx_extented_min_max (dependence) -/// -/// @defgroup gtx_quaternion GLM_GTX_quaternion -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Extented quaternion types and functions - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/constants.hpp" -#include "../gtc/quaternion.hpp" -#include "../ext/quaternion_exponential.hpp" -#include "../gtx/norm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_quaternion is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_quaternion extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_quaternion - /// @{ - - /// Create an identity quaternion. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL GLM_CONSTEXPR qua quat_identity(); - - /// Compute a cross product between a quaternion and a vector. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL vec<3, T, Q> cross( - qua const& q, - vec<3, T, Q> const& v); - - //! Compute a cross product between a vector and a quaternion. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL vec<3, T, Q> cross( - vec<3, T, Q> const& v, - qua const& q); - - //! Compute a point on a path according squad equation. - //! q1 and q2 are control points; s1 and s2 are intermediate control points. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL qua squad( - qua const& q1, - qua const& q2, - qua const& s1, - qua const& s2, - T const& h); - - //! Returns an intermediate control point for squad interpolation. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL qua intermediate( - qua const& prev, - qua const& curr, - qua const& next); - - //! Returns quarternion square root. - /// - /// @see gtx_quaternion - //template - //qua sqrt( - // qua const& q); - - //! Rotates a 3 components vector by a quaternion. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL vec<3, T, Q> rotate( - qua const& q, - vec<3, T, Q> const& v); - - /// Rotates a 4 components vector by a quaternion. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL vec<4, T, Q> rotate( - qua const& q, - vec<4, T, Q> const& v); - - /// Extract the real component of a quaternion. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL T extractRealComponent( - qua const& q); - - /// Converts a quaternion to a 3 * 3 matrix. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL mat<3, 3, T, Q> toMat3( - qua const& x){return mat3_cast(x);} - - /// Converts a quaternion to a 4 * 4 matrix. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL mat<4, 4, T, Q> toMat4( - qua const& x){return mat4_cast(x);} - - /// Converts a 3 * 3 matrix to a quaternion. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL qua toQuat( - mat<3, 3, T, Q> const& x){return quat_cast(x);} - - /// Converts a 4 * 4 matrix to a quaternion. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL qua toQuat( - mat<4, 4, T, Q> const& x){return quat_cast(x);} - - /// Quaternion interpolation using the rotation short path. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL qua shortMix( - qua const& x, - qua const& y, - T const& a); - - /// Quaternion normalized linear interpolation. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL qua fastMix( - qua const& x, - qua const& y, - T const& a); - - /// Compute the rotation between two vectors. - /// @param orig vector, needs to be normalized - /// @param dest vector, needs to be normalized - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL qua rotation( - vec<3, T, Q> const& orig, - vec<3, T, Q> const& dest); - - /// Returns the squared length of x. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL GLM_CONSTEXPR T length2(qua const& q); - - /// @} -}//namespace glm - -#include "quaternion.inl" diff --git a/third_party/glm/gtx/quaternion.inl b/third_party/glm/gtx/quaternion.inl deleted file mode 100755 index d125bcc..0000000 --- a/third_party/glm/gtx/quaternion.inl +++ /dev/null @@ -1,159 +0,0 @@ -/// @ref gtx_quaternion - -#include -#include "../gtc/constants.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua quat_identity() - { - return qua(static_cast(1), static_cast(0), static_cast(0), static_cast(0)); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> cross(vec<3, T, Q> const& v, qua const& q) - { - return inverse(q) * v; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> cross(qua const& q, vec<3, T, Q> const& v) - { - return q * v; - } - - template - GLM_FUNC_QUALIFIER qua squad - ( - qua const& q1, - qua const& q2, - qua const& s1, - qua const& s2, - T const& h) - { - return mix(mix(q1, q2, h), mix(s1, s2, h), static_cast(2) * (static_cast(1) - h) * h); - } - - template - GLM_FUNC_QUALIFIER qua intermediate - ( - qua const& prev, - qua const& curr, - qua const& next - ) - { - qua invQuat = inverse(curr); - return exp((log(next * invQuat) + log(prev * invQuat)) / static_cast(-4)) * curr; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rotate(qua const& q, vec<3, T, Q> const& v) - { - return q * v; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> rotate(qua const& q, vec<4, T, Q> const& v) - { - return q * v; - } - - template - GLM_FUNC_QUALIFIER T extractRealComponent(qua const& q) - { - T w = static_cast(1) - q.x * q.x - q.y * q.y - q.z * q.z; - if(w < T(0)) - return T(0); - else - return -sqrt(w); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T length2(qua const& q) - { - return q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w; - } - - template - GLM_FUNC_QUALIFIER qua shortMix(qua const& x, qua const& y, T const& a) - { - if(a <= static_cast(0)) return x; - if(a >= static_cast(1)) return y; - - T fCos = dot(x, y); - qua y2(y); //BUG!!! qua y2; - if(fCos < static_cast(0)) - { - y2 = -y; - fCos = -fCos; - } - - //if(fCos > 1.0f) // problem - T k0, k1; - if(fCos > (static_cast(1) - epsilon())) - { - k0 = static_cast(1) - a; - k1 = static_cast(0) + a; //BUG!!! 1.0f + a; - } - else - { - T fSin = sqrt(T(1) - fCos * fCos); - T fAngle = atan(fSin, fCos); - T fOneOverSin = static_cast(1) / fSin; - k0 = sin((static_cast(1) - a) * fAngle) * fOneOverSin; - k1 = sin((static_cast(0) + a) * fAngle) * fOneOverSin; - } - - return qua( - k0 * x.w + k1 * y2.w, - k0 * x.x + k1 * y2.x, - k0 * x.y + k1 * y2.y, - k0 * x.z + k1 * y2.z); - } - - template - GLM_FUNC_QUALIFIER qua fastMix(qua const& x, qua const& y, T const& a) - { - return glm::normalize(x * (static_cast(1) - a) + (y * a)); - } - - template - GLM_FUNC_QUALIFIER qua rotation(vec<3, T, Q> const& orig, vec<3, T, Q> const& dest) - { - T cosTheta = dot(orig, dest); - vec<3, T, Q> rotationAxis; - - if(cosTheta >= static_cast(1) - epsilon()) { - // orig and dest point in the same direction - return quat_identity(); - } - - if(cosTheta < static_cast(-1) + epsilon()) - { - // special case when vectors in opposite directions : - // there is no "ideal" rotation axis - // So guess one; any will do as long as it's perpendicular to start - // This implementation favors a rotation around the Up axis (Y), - // since it's often what you want to do. - rotationAxis = cross(vec<3, T, Q>(0, 0, 1), orig); - if(length2(rotationAxis) < epsilon()) // bad luck, they were parallel, try again! - rotationAxis = cross(vec<3, T, Q>(1, 0, 0), orig); - - rotationAxis = normalize(rotationAxis); - return angleAxis(pi(), rotationAxis); - } - - // Implementation from Stan Melax's Game Programming Gems 1 article - rotationAxis = cross(orig, dest); - - T s = sqrt((T(1) + cosTheta) * static_cast(2)); - T invs = static_cast(1) / s; - - return qua( - s * static_cast(0.5f), - rotationAxis.x * invs, - rotationAxis.y * invs, - rotationAxis.z * invs); - } -}//namespace glm diff --git a/third_party/glm/gtx/range.hpp b/third_party/glm/gtx/range.hpp deleted file mode 100755 index 93bcb9a..0000000 --- a/third_party/glm/gtx/range.hpp +++ /dev/null @@ -1,98 +0,0 @@ -/// @ref gtx_range -/// @file glm/gtx/range.hpp -/// @author Joshua Moerman -/// -/// @defgroup gtx_range GLM_GTX_range -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Defines begin and end for vectors and matrices. Useful for range-based for loop. -/// The range is defined over the elements, not over columns or rows (e.g. mat4 has 16 elements). - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_range is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_range extension included") -# endif -#endif - -#include "../gtc/type_ptr.hpp" -#include "../gtc/vec1.hpp" - -namespace glm -{ - /// @addtogroup gtx_range - /// @{ - -# if GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(push) -# pragma warning(disable : 4100) // unreferenced formal parameter -# endif - - template - inline length_t components(vec<1, T, Q> const& v) - { - return v.length(); - } - - template - inline length_t components(vec<2, T, Q> const& v) - { - return v.length(); - } - - template - inline length_t components(vec<3, T, Q> const& v) - { - return v.length(); - } - - template - inline length_t components(vec<4, T, Q> const& v) - { - return v.length(); - } - - template - inline length_t components(genType const& m) - { - return m.length() * m[0].length(); - } - - template - inline typename genType::value_type const * begin(genType const& v) - { - return value_ptr(v); - } - - template - inline typename genType::value_type const * end(genType const& v) - { - return begin(v) + components(v); - } - - template - inline typename genType::value_type * begin(genType& v) - { - return value_ptr(v); - } - - template - inline typename genType::value_type * end(genType& v) - { - return begin(v) + components(v); - } - -# if GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(pop) -# endif - - /// @} -}//namespace glm diff --git a/third_party/glm/gtx/raw_data.hpp b/third_party/glm/gtx/raw_data.hpp deleted file mode 100755 index 86cbe77..0000000 --- a/third_party/glm/gtx/raw_data.hpp +++ /dev/null @@ -1,51 +0,0 @@ -/// @ref gtx_raw_data -/// @file glm/gtx/raw_data.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_raw_data GLM_GTX_raw_data -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Projection of a vector to other one - -#pragma once - -// Dependencies -#include "../ext/scalar_uint_sized.hpp" -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_raw_data is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_raw_data extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_raw_data - /// @{ - - //! Type for byte numbers. - //! From GLM_GTX_raw_data extension. - typedef detail::uint8 byte; - - //! Type for word numbers. - //! From GLM_GTX_raw_data extension. - typedef detail::uint16 word; - - //! Type for dword numbers. - //! From GLM_GTX_raw_data extension. - typedef detail::uint32 dword; - - //! Type for qword numbers. - //! From GLM_GTX_raw_data extension. - typedef detail::uint64 qword; - - /// @} -}// namespace glm - -#include "raw_data.inl" diff --git a/third_party/glm/gtx/raw_data.inl b/third_party/glm/gtx/raw_data.inl deleted file mode 100755 index c740317..0000000 --- a/third_party/glm/gtx/raw_data.inl +++ /dev/null @@ -1,2 +0,0 @@ -/// @ref gtx_raw_data - diff --git a/third_party/glm/gtx/rotate_normalized_axis.hpp b/third_party/glm/gtx/rotate_normalized_axis.hpp deleted file mode 100755 index 2103ca0..0000000 --- a/third_party/glm/gtx/rotate_normalized_axis.hpp +++ /dev/null @@ -1,68 +0,0 @@ -/// @ref gtx_rotate_normalized_axis -/// @file glm/gtx/rotate_normalized_axis.hpp -/// -/// @see core (dependence) -/// @see gtc_matrix_transform -/// @see gtc_quaternion -/// -/// @defgroup gtx_rotate_normalized_axis GLM_GTX_rotate_normalized_axis -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Quaternions and matrices rotations around normalized axis. - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/epsilon.hpp" -#include "../gtc/quaternion.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_rotate_normalized_axis is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_rotate_normalized_axis extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_rotate_normalized_axis - /// @{ - - /// Builds a rotation 4 * 4 matrix created from a normalized axis and an angle. - /// - /// @param m Input matrix multiplied by this rotation matrix. - /// @param angle Rotation angle expressed in radians. - /// @param axis Rotation axis, must be normalized. - /// @tparam T Value type used to build the matrix. Currently supported: half (not recommended), float or double. - /// - /// @see gtx_rotate_normalized_axis - /// @see - rotate(T angle, T x, T y, T z) - /// @see - rotate(mat<4, 4, T, Q> const& m, T angle, T x, T y, T z) - /// @see - rotate(T angle, vec<3, T, Q> const& v) - template - GLM_FUNC_DECL mat<4, 4, T, Q> rotateNormalizedAxis( - mat<4, 4, T, Q> const& m, - T const& angle, - vec<3, T, Q> const& axis); - - /// Rotates a quaternion from a vector of 3 components normalized axis and an angle. - /// - /// @param q Source orientation - /// @param angle Angle expressed in radians. - /// @param axis Normalized axis of the rotation, must be normalized. - /// - /// @see gtx_rotate_normalized_axis - template - GLM_FUNC_DECL qua rotateNormalizedAxis( - qua const& q, - T const& angle, - vec<3, T, Q> const& axis); - - /// @} -}//namespace glm - -#include "rotate_normalized_axis.inl" diff --git a/third_party/glm/gtx/rotate_normalized_axis.inl b/third_party/glm/gtx/rotate_normalized_axis.inl deleted file mode 100755 index b2e9278..0000000 --- a/third_party/glm/gtx/rotate_normalized_axis.inl +++ /dev/null @@ -1,58 +0,0 @@ -/// @ref gtx_rotate_normalized_axis - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotateNormalizedAxis - ( - mat<4, 4, T, Q> const& m, - T const& angle, - vec<3, T, Q> const& v - ) - { - T const a = angle; - T const c = cos(a); - T const s = sin(a); - - vec<3, T, Q> const axis(v); - - vec<3, T, Q> const temp((static_cast(1) - c) * axis); - - mat<4, 4, T, Q> Rotate; - Rotate[0][0] = c + temp[0] * axis[0]; - Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2]; - Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1]; - - Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2]; - Rotate[1][1] = c + temp[1] * axis[1]; - Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0]; - - Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1]; - Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0]; - Rotate[2][2] = c + temp[2] * axis[2]; - - mat<4, 4, T, Q> Result; - Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2]; - Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2]; - Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2]; - Result[3] = m[3]; - return Result; - } - - template - GLM_FUNC_QUALIFIER qua rotateNormalizedAxis - ( - qua const& q, - T const& angle, - vec<3, T, Q> const& v - ) - { - vec<3, T, Q> const Tmp(v); - - T const AngleRad(angle); - T const Sin = sin(AngleRad * T(0.5)); - - return q * qua(cos(AngleRad * static_cast(0.5)), Tmp.x * Sin, Tmp.y * Sin, Tmp.z * Sin); - //return gtc::quaternion::cross(q, tquat(cos(AngleRad * T(0.5)), Tmp.x * fSin, Tmp.y * fSin, Tmp.z * fSin)); - } -}//namespace glm diff --git a/third_party/glm/gtx/rotate_vector.hpp b/third_party/glm/gtx/rotate_vector.hpp deleted file mode 100755 index dcd5b95..0000000 --- a/third_party/glm/gtx/rotate_vector.hpp +++ /dev/null @@ -1,123 +0,0 @@ -/// @ref gtx_rotate_vector -/// @file glm/gtx/rotate_vector.hpp -/// -/// @see core (dependence) -/// @see gtx_transform (dependence) -/// -/// @defgroup gtx_rotate_vector GLM_GTX_rotate_vector -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Function to directly rotate a vector - -#pragma once - -// Dependency: -#include "../gtx/transform.hpp" -#include "../gtc/epsilon.hpp" -#include "../ext/vector_relational.hpp" -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_rotate_vector is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_rotate_vector extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_rotate_vector - /// @{ - - /// Returns Spherical interpolation between two vectors - /// - /// @param x A first vector - /// @param y A second vector - /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1]. - /// - /// @see gtx_rotate_vector - template - GLM_FUNC_DECL vec<3, T, Q> slerp( - vec<3, T, Q> const& x, - vec<3, T, Q> const& y, - T const& a); - - //! Rotate a two dimensional vector. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<2, T, Q> rotate( - vec<2, T, Q> const& v, - T const& angle); - - //! Rotate a three dimensional vector around an axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<3, T, Q> rotate( - vec<3, T, Q> const& v, - T const& angle, - vec<3, T, Q> const& normal); - - //! Rotate a four dimensional vector around an axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<4, T, Q> rotate( - vec<4, T, Q> const& v, - T const& angle, - vec<3, T, Q> const& normal); - - //! Rotate a three dimensional vector around the X axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<3, T, Q> rotateX( - vec<3, T, Q> const& v, - T const& angle); - - //! Rotate a three dimensional vector around the Y axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<3, T, Q> rotateY( - vec<3, T, Q> const& v, - T const& angle); - - //! Rotate a three dimensional vector around the Z axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<3, T, Q> rotateZ( - vec<3, T, Q> const& v, - T const& angle); - - //! Rotate a four dimensional vector around the X axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<4, T, Q> rotateX( - vec<4, T, Q> const& v, - T const& angle); - - //! Rotate a four dimensional vector around the Y axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<4, T, Q> rotateY( - vec<4, T, Q> const& v, - T const& angle); - - //! Rotate a four dimensional vector around the Z axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<4, T, Q> rotateZ( - vec<4, T, Q> const& v, - T const& angle); - - //! Build a rotation matrix from a normal and a up vector. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> orientation( - vec<3, T, Q> const& Normal, - vec<3, T, Q> const& Up); - - /// @} -}//namespace glm - -#include "rotate_vector.inl" diff --git a/third_party/glm/gtx/rotate_vector.inl b/third_party/glm/gtx/rotate_vector.inl deleted file mode 100755 index f8136e7..0000000 --- a/third_party/glm/gtx/rotate_vector.inl +++ /dev/null @@ -1,187 +0,0 @@ -/// @ref gtx_rotate_vector - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> slerp - ( - vec<3, T, Q> const& x, - vec<3, T, Q> const& y, - T const& a - ) - { - // get cosine of angle between vectors (-1 -> 1) - T CosAlpha = dot(x, y); - // get angle (0 -> pi) - T Alpha = acos(CosAlpha); - // get sine of angle between vectors (0 -> 1) - T SinAlpha = sin(Alpha); - // this breaks down when SinAlpha = 0, i.e. Alpha = 0 or pi - T t1 = sin((static_cast(1) - a) * Alpha) / SinAlpha; - T t2 = sin(a * Alpha) / SinAlpha; - - // interpolate src vectors - return x * t1 + y * t2; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> rotate - ( - vec<2, T, Q> const& v, - T const& angle - ) - { - vec<2, T, Q> Result; - T const Cos(cos(angle)); - T const Sin(sin(angle)); - - Result.x = v.x * Cos - v.y * Sin; - Result.y = v.x * Sin + v.y * Cos; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rotate - ( - vec<3, T, Q> const& v, - T const& angle, - vec<3, T, Q> const& normal - ) - { - return mat<3, 3, T, Q>(glm::rotate(angle, normal)) * v; - } - /* - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rotateGTX( - const vec<3, T, Q>& x, - T angle, - const vec<3, T, Q>& normal) - { - const T Cos = cos(radians(angle)); - const T Sin = sin(radians(angle)); - return x * Cos + ((x * normal) * (T(1) - Cos)) * normal + cross(x, normal) * Sin; - } - */ - template - GLM_FUNC_QUALIFIER vec<4, T, Q> rotate - ( - vec<4, T, Q> const& v, - T const& angle, - vec<3, T, Q> const& normal - ) - { - return rotate(angle, normal) * v; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rotateX - ( - vec<3, T, Q> const& v, - T const& angle - ) - { - vec<3, T, Q> Result(v); - T const Cos(cos(angle)); - T const Sin(sin(angle)); - - Result.y = v.y * Cos - v.z * Sin; - Result.z = v.y * Sin + v.z * Cos; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rotateY - ( - vec<3, T, Q> const& v, - T const& angle - ) - { - vec<3, T, Q> Result = v; - T const Cos(cos(angle)); - T const Sin(sin(angle)); - - Result.x = v.x * Cos + v.z * Sin; - Result.z = -v.x * Sin + v.z * Cos; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rotateZ - ( - vec<3, T, Q> const& v, - T const& angle - ) - { - vec<3, T, Q> Result = v; - T const Cos(cos(angle)); - T const Sin(sin(angle)); - - Result.x = v.x * Cos - v.y * Sin; - Result.y = v.x * Sin + v.y * Cos; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> rotateX - ( - vec<4, T, Q> const& v, - T const& angle - ) - { - vec<4, T, Q> Result = v; - T const Cos(cos(angle)); - T const Sin(sin(angle)); - - Result.y = v.y * Cos - v.z * Sin; - Result.z = v.y * Sin + v.z * Cos; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> rotateY - ( - vec<4, T, Q> const& v, - T const& angle - ) - { - vec<4, T, Q> Result = v; - T const Cos(cos(angle)); - T const Sin(sin(angle)); - - Result.x = v.x * Cos + v.z * Sin; - Result.z = -v.x * Sin + v.z * Cos; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> rotateZ - ( - vec<4, T, Q> const& v, - T const& angle - ) - { - vec<4, T, Q> Result = v; - T const Cos(cos(angle)); - T const Sin(sin(angle)); - - Result.x = v.x * Cos - v.y * Sin; - Result.y = v.x * Sin + v.y * Cos; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> orientation - ( - vec<3, T, Q> const& Normal, - vec<3, T, Q> const& Up - ) - { - if(all(equal(Normal, Up, epsilon()))) - return mat<4, 4, T, Q>(static_cast(1)); - - vec<3, T, Q> RotationAxis = cross(Up, Normal); - T Angle = acos(dot(Normal, Up)); - - return rotate(Angle, RotationAxis); - } -}//namespace glm diff --git a/third_party/glm/gtx/scalar_multiplication.hpp b/third_party/glm/gtx/scalar_multiplication.hpp deleted file mode 100755 index 496ba19..0000000 --- a/third_party/glm/gtx/scalar_multiplication.hpp +++ /dev/null @@ -1,75 +0,0 @@ -/// @ref gtx -/// @file glm/gtx/scalar_multiplication.hpp -/// @author Joshua Moerman -/// -/// Include to use the features of this extension. -/// -/// Enables scalar multiplication for all types -/// -/// Since GLSL is very strict about types, the following (often used) combinations do not work: -/// double * vec4 -/// int * vec4 -/// vec4 / int -/// So we'll fix that! Of course "float * vec4" should remain the same (hence the enable_if magic) - -#pragma once - -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_scalar_multiplication is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_scalar_multiplication extension included") -# endif -#endif - -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include "../mat2x2.hpp" -#include - -namespace glm -{ - template - using return_type_scalar_multiplication = typename std::enable_if< - !std::is_same::value // T may not be a float - && std::is_arithmetic::value, Vec // But it may be an int or double (no vec3 or mat3, ...) - >::type; - -#define GLM_IMPLEMENT_SCAL_MULT(Vec) \ - template \ - return_type_scalar_multiplication \ - operator*(T const& s, Vec rh){ \ - return rh *= static_cast(s); \ - } \ - \ - template \ - return_type_scalar_multiplication \ - operator*(Vec lh, T const& s){ \ - return lh *= static_cast(s); \ - } \ - \ - template \ - return_type_scalar_multiplication \ - operator/(Vec lh, T const& s){ \ - return lh *= 1.0f / static_cast(s); \ - } - -GLM_IMPLEMENT_SCAL_MULT(vec2) -GLM_IMPLEMENT_SCAL_MULT(vec3) -GLM_IMPLEMENT_SCAL_MULT(vec4) - -GLM_IMPLEMENT_SCAL_MULT(mat2) -GLM_IMPLEMENT_SCAL_MULT(mat2x3) -GLM_IMPLEMENT_SCAL_MULT(mat2x4) -GLM_IMPLEMENT_SCAL_MULT(mat3x2) -GLM_IMPLEMENT_SCAL_MULT(mat3) -GLM_IMPLEMENT_SCAL_MULT(mat3x4) -GLM_IMPLEMENT_SCAL_MULT(mat4x2) -GLM_IMPLEMENT_SCAL_MULT(mat4x3) -GLM_IMPLEMENT_SCAL_MULT(mat4) - -#undef GLM_IMPLEMENT_SCAL_MULT -} // namespace glm diff --git a/third_party/glm/gtx/scalar_relational.hpp b/third_party/glm/gtx/scalar_relational.hpp deleted file mode 100755 index 8be9c57..0000000 --- a/third_party/glm/gtx/scalar_relational.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref gtx_scalar_relational -/// @file glm/gtx/scalar_relational.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_scalar_relational GLM_GTX_scalar_relational -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Extend a position from a source to a position at a defined length. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_extend is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_extend extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_scalar_relational - /// @{ - - - - /// @} -}//namespace glm - -#include "scalar_relational.inl" diff --git a/third_party/glm/gtx/scalar_relational.inl b/third_party/glm/gtx/scalar_relational.inl deleted file mode 100755 index c2a121c..0000000 --- a/third_party/glm/gtx/scalar_relational.inl +++ /dev/null @@ -1,88 +0,0 @@ -/// @ref gtx_scalar_relational - -namespace glm -{ - template - GLM_FUNC_QUALIFIER bool lessThan - ( - T const& x, - T const& y - ) - { - return x < y; - } - - template - GLM_FUNC_QUALIFIER bool lessThanEqual - ( - T const& x, - T const& y - ) - { - return x <= y; - } - - template - GLM_FUNC_QUALIFIER bool greaterThan - ( - T const& x, - T const& y - ) - { - return x > y; - } - - template - GLM_FUNC_QUALIFIER bool greaterThanEqual - ( - T const& x, - T const& y - ) - { - return x >= y; - } - - template - GLM_FUNC_QUALIFIER bool equal - ( - T const& x, - T const& y - ) - { - return detail::compute_equal::is_iec559>::call(x, y); - } - - template - GLM_FUNC_QUALIFIER bool notEqual - ( - T const& x, - T const& y - ) - { - return !detail::compute_equal::is_iec559>::call(x, y); - } - - GLM_FUNC_QUALIFIER bool any - ( - bool const& x - ) - { - return x; - } - - GLM_FUNC_QUALIFIER bool all - ( - bool const& x - ) - { - return x; - } - - GLM_FUNC_QUALIFIER bool not_ - ( - bool const& x - ) - { - return !x; - } -}//namespace glm diff --git a/third_party/glm/gtx/spline.hpp b/third_party/glm/gtx/spline.hpp deleted file mode 100755 index 731c979..0000000 --- a/third_party/glm/gtx/spline.hpp +++ /dev/null @@ -1,65 +0,0 @@ -/// @ref gtx_spline -/// @file glm/gtx/spline.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_spline GLM_GTX_spline -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Spline functions - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtx/optimum_pow.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_spline is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_spline extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_spline - /// @{ - - /// Return a point from a catmull rom curve. - /// @see gtx_spline extension. - template - GLM_FUNC_DECL genType catmullRom( - genType const& v1, - genType const& v2, - genType const& v3, - genType const& v4, - typename genType::value_type const& s); - - /// Return a point from a hermite curve. - /// @see gtx_spline extension. - template - GLM_FUNC_DECL genType hermite( - genType const& v1, - genType const& t1, - genType const& v2, - genType const& t2, - typename genType::value_type const& s); - - /// Return a point from a cubic curve. - /// @see gtx_spline extension. - template - GLM_FUNC_DECL genType cubic( - genType const& v1, - genType const& v2, - genType const& v3, - genType const& v4, - typename genType::value_type const& s); - - /// @} -}//namespace glm - -#include "spline.inl" diff --git a/third_party/glm/gtx/spline.inl b/third_party/glm/gtx/spline.inl deleted file mode 100755 index c3fd056..0000000 --- a/third_party/glm/gtx/spline.inl +++ /dev/null @@ -1,60 +0,0 @@ -/// @ref gtx_spline - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType catmullRom - ( - genType const& v1, - genType const& v2, - genType const& v3, - genType const& v4, - typename genType::value_type const& s - ) - { - typename genType::value_type s2 = pow2(s); - typename genType::value_type s3 = pow3(s); - - typename genType::value_type f1 = -s3 + typename genType::value_type(2) * s2 - s; - typename genType::value_type f2 = typename genType::value_type(3) * s3 - typename genType::value_type(5) * s2 + typename genType::value_type(2); - typename genType::value_type f3 = typename genType::value_type(-3) * s3 + typename genType::value_type(4) * s2 + s; - typename genType::value_type f4 = s3 - s2; - - return (f1 * v1 + f2 * v2 + f3 * v3 + f4 * v4) / typename genType::value_type(2); - - } - - template - GLM_FUNC_QUALIFIER genType hermite - ( - genType const& v1, - genType const& t1, - genType const& v2, - genType const& t2, - typename genType::value_type const& s - ) - { - typename genType::value_type s2 = pow2(s); - typename genType::value_type s3 = pow3(s); - - typename genType::value_type f1 = typename genType::value_type(2) * s3 - typename genType::value_type(3) * s2 + typename genType::value_type(1); - typename genType::value_type f2 = typename genType::value_type(-2) * s3 + typename genType::value_type(3) * s2; - typename genType::value_type f3 = s3 - typename genType::value_type(2) * s2 + s; - typename genType::value_type f4 = s3 - s2; - - return f1 * v1 + f2 * v2 + f3 * t1 + f4 * t2; - } - - template - GLM_FUNC_QUALIFIER genType cubic - ( - genType const& v1, - genType const& v2, - genType const& v3, - genType const& v4, - typename genType::value_type const& s - ) - { - return ((v1 * s + v2) * s + v3) * s + v4; - } -}//namespace glm diff --git a/third_party/glm/gtx/std_based_type.hpp b/third_party/glm/gtx/std_based_type.hpp deleted file mode 100755 index cd3be8c..0000000 --- a/third_party/glm/gtx/std_based_type.hpp +++ /dev/null @@ -1,68 +0,0 @@ -/// @ref gtx_std_based_type -/// @file glm/gtx/std_based_type.hpp -/// -/// @see core (dependence) -/// @see gtx_extented_min_max (dependence) -/// -/// @defgroup gtx_std_based_type GLM_GTX_std_based_type -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Adds vector types based on STL value types. - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_std_based_type is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_std_based_type extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_std_based_type - /// @{ - - /// Vector type based of one std::size_t component. - /// @see GLM_GTX_std_based_type - typedef vec<1, std::size_t, defaultp> size1; - - /// Vector type based of two std::size_t components. - /// @see GLM_GTX_std_based_type - typedef vec<2, std::size_t, defaultp> size2; - - /// Vector type based of three std::size_t components. - /// @see GLM_GTX_std_based_type - typedef vec<3, std::size_t, defaultp> size3; - - /// Vector type based of four std::size_t components. - /// @see GLM_GTX_std_based_type - typedef vec<4, std::size_t, defaultp> size4; - - /// Vector type based of one std::size_t component. - /// @see GLM_GTX_std_based_type - typedef vec<1, std::size_t, defaultp> size1_t; - - /// Vector type based of two std::size_t components. - /// @see GLM_GTX_std_based_type - typedef vec<2, std::size_t, defaultp> size2_t; - - /// Vector type based of three std::size_t components. - /// @see GLM_GTX_std_based_type - typedef vec<3, std::size_t, defaultp> size3_t; - - /// Vector type based of four std::size_t components. - /// @see GLM_GTX_std_based_type - typedef vec<4, std::size_t, defaultp> size4_t; - - /// @} -}//namespace glm - -#include "std_based_type.inl" diff --git a/third_party/glm/gtx/std_based_type.inl b/third_party/glm/gtx/std_based_type.inl deleted file mode 100755 index 9c34bdb..0000000 --- a/third_party/glm/gtx/std_based_type.inl +++ /dev/null @@ -1,6 +0,0 @@ -/// @ref gtx_std_based_type - -namespace glm -{ - -} diff --git a/third_party/glm/gtx/string_cast.hpp b/third_party/glm/gtx/string_cast.hpp deleted file mode 100755 index 27846bf..0000000 --- a/third_party/glm/gtx/string_cast.hpp +++ /dev/null @@ -1,52 +0,0 @@ -/// @ref gtx_string_cast -/// @file glm/gtx/string_cast.hpp -/// -/// @see core (dependence) -/// @see gtx_integer (dependence) -/// @see gtx_quaternion (dependence) -/// -/// @defgroup gtx_string_cast GLM_GTX_string_cast -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Setup strings for GLM type values -/// -/// This extension is not supported with CUDA - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/type_precision.hpp" -#include "../gtc/quaternion.hpp" -#include "../gtx/dual_quaternion.hpp" -#include -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_string_cast is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_string_cast extension included") -# endif -#endif - -#if(GLM_COMPILER & GLM_COMPILER_CUDA) -# error "GLM_GTX_string_cast is not supported on CUDA compiler" -#endif - -namespace glm -{ - /// @addtogroup gtx_string_cast - /// @{ - - /// Create a string from a GLM vector or matrix typed variable. - /// @see gtx_string_cast extension. - template - GLM_FUNC_DECL std::string to_string(genType const& x); - - /// @} -}//namespace glm - -#include "string_cast.inl" diff --git a/third_party/glm/gtx/string_cast.inl b/third_party/glm/gtx/string_cast.inl deleted file mode 100755 index f67751d..0000000 --- a/third_party/glm/gtx/string_cast.inl +++ /dev/null @@ -1,492 +0,0 @@ -/// @ref gtx_string_cast - -#include -#include - -namespace glm{ -namespace detail -{ - template - struct cast - { - typedef T value_type; - }; - - template <> - struct cast - { - typedef double value_type; - }; - - GLM_FUNC_QUALIFIER std::string format(const char* msg, ...) - { - std::size_t const STRING_BUFFER(4096); - char text[STRING_BUFFER]; - va_list list; - - if(msg == GLM_NULLPTR) - return std::string(); - - va_start(list, msg); -# if (GLM_COMPILER & GLM_COMPILER_VC) - vsprintf_s(text, STRING_BUFFER, msg, list); -# else// - std::vsprintf(text, msg, list); -# endif// - va_end(list); - - return std::string(text); - } - - static const char* LabelTrue = "true"; - static const char* LabelFalse = "false"; - - template - struct literal - { - GLM_FUNC_QUALIFIER static char const * value() {return "%d";} - }; - - template - struct literal - { - GLM_FUNC_QUALIFIER static char const * value() {return "%f";} - }; - -# if GLM_MODEL == GLM_MODEL_32 && GLM_COMPILER && GLM_COMPILER_VC - template<> - struct literal - { - GLM_FUNC_QUALIFIER static char const * value() {return "%lld";} - }; - - template<> - struct literal - { - GLM_FUNC_QUALIFIER static char const * value() {return "%lld";} - }; -# endif//GLM_MODEL == GLM_MODEL_32 && GLM_COMPILER && GLM_COMPILER_VC - - template - struct prefix{}; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "d";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "b";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "u8";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "i8";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "u16";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "i16";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "u";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "i";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "u64";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "i64";} - }; - - template - struct compute_to_string - {}; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<1, bool, Q> const& x) - { - return detail::format("bvec1(%s)", - x[0] ? detail::LabelTrue : detail::LabelFalse); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<2, bool, Q> const& x) - { - return detail::format("bvec2(%s, %s)", - x[0] ? detail::LabelTrue : detail::LabelFalse, - x[1] ? detail::LabelTrue : detail::LabelFalse); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<3, bool, Q> const& x) - { - return detail::format("bvec3(%s, %s, %s)", - x[0] ? detail::LabelTrue : detail::LabelFalse, - x[1] ? detail::LabelTrue : detail::LabelFalse, - x[2] ? detail::LabelTrue : detail::LabelFalse); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<4, bool, Q> const& x) - { - return detail::format("bvec4(%s, %s, %s, %s)", - x[0] ? detail::LabelTrue : detail::LabelFalse, - x[1] ? detail::LabelTrue : detail::LabelFalse, - x[2] ? detail::LabelTrue : detail::LabelFalse, - x[3] ? detail::LabelTrue : detail::LabelFalse); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<1, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%svec1(%s)", - PrefixStr, - LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<2, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%svec2(%s, %s)", - PrefixStr, - LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0]), - static_cast::value_type>(x[1])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<3, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%svec3(%s, %s, %s)", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0]), - static_cast::value_type>(x[1]), - static_cast::value_type>(x[2])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<4, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%svec4(%s, %s, %s, %s)", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0]), - static_cast::value_type>(x[1]), - static_cast::value_type>(x[2]), - static_cast::value_type>(x[3])); - } - }; - - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<2, 2, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat2x2((%s, %s), (%s, %s))", - PrefixStr, - LiteralStr, LiteralStr, - LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<2, 3, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat2x3((%s, %s, %s), (%s, %s, %s))", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<2, 4, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat2x4((%s, %s, %s, %s), (%s, %s, %s, %s))", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), static_cast::value_type>(x[0][3]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), static_cast::value_type>(x[1][3])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<3, 2, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat3x2((%s, %s), (%s, %s), (%s, %s))", - PrefixStr, - LiteralStr, LiteralStr, - LiteralStr, LiteralStr, - LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), - static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<3, 3, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat3x3((%s, %s, %s), (%s, %s, %s), (%s, %s, %s))", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), - static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<3, 4, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat3x4((%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s))", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), static_cast::value_type>(x[0][3]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), static_cast::value_type>(x[1][3]), - static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2]), static_cast::value_type>(x[2][3])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<4, 2, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat4x2((%s, %s), (%s, %s), (%s, %s), (%s, %s))", - PrefixStr, - LiteralStr, LiteralStr, - LiteralStr, LiteralStr, - LiteralStr, LiteralStr, - LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), - static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), - static_cast::value_type>(x[3][0]), static_cast::value_type>(x[3][1])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<4, 3, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat4x3((%s, %s, %s), (%s, %s, %s), (%s, %s, %s), (%s, %s, %s))", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), - static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2]), - static_cast::value_type>(x[3][0]), static_cast::value_type>(x[3][1]), static_cast::value_type>(x[3][2])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<4, 4, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat4x4((%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s))", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), static_cast::value_type>(x[0][3]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), static_cast::value_type>(x[1][3]), - static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2]), static_cast::value_type>(x[2][3]), - static_cast::value_type>(x[3][0]), static_cast::value_type>(x[3][1]), static_cast::value_type>(x[3][2]), static_cast::value_type>(x[3][3])); - } - }; - - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(qua const& q) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%squat(%s, {%s, %s, %s})", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(q.w), - static_cast::value_type>(q.x), - static_cast::value_type>(q.y), - static_cast::value_type>(q.z)); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(tdualquat const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%sdualquat((%s, {%s, %s, %s}), (%s, {%s, %s, %s}))", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x.real.w), - static_cast::value_type>(x.real.x), - static_cast::value_type>(x.real.y), - static_cast::value_type>(x.real.z), - static_cast::value_type>(x.dual.w), - static_cast::value_type>(x.dual.x), - static_cast::value_type>(x.dual.y), - static_cast::value_type>(x.dual.z)); - } - }; - -}//namespace detail - -template -GLM_FUNC_QUALIFIER std::string to_string(matType const& x) -{ - return detail::compute_to_string::call(x); -} - -}//namespace glm diff --git a/third_party/glm/gtx/texture.hpp b/third_party/glm/gtx/texture.hpp deleted file mode 100755 index 20585e6..0000000 --- a/third_party/glm/gtx/texture.hpp +++ /dev/null @@ -1,46 +0,0 @@ -/// @ref gtx_texture -/// @file glm/gtx/texture.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_texture GLM_GTX_texture -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Wrapping mode of texture coordinates. - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/integer.hpp" -#include "../gtx/component_wise.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_texture is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_texture extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_texture - /// @{ - - /// Compute the number of mipmaps levels necessary to create a mipmap complete texture - /// - /// @param Extent Extent of the texture base level mipmap - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or signed integer scalar types - /// @tparam Q Value from qualifier enum - template - T levels(vec const& Extent); - - /// @} -}// namespace glm - -#include "texture.inl" - diff --git a/third_party/glm/gtx/texture.inl b/third_party/glm/gtx/texture.inl deleted file mode 100755 index 593c826..0000000 --- a/third_party/glm/gtx/texture.inl +++ /dev/null @@ -1,17 +0,0 @@ -/// @ref gtx_texture - -namespace glm -{ - template - inline T levels(vec const& Extent) - { - return glm::log2(compMax(Extent)) + static_cast(1); - } - - template - inline T levels(T Extent) - { - return vec<1, T, defaultp>(Extent).x; - } -}//namespace glm - diff --git a/third_party/glm/gtx/transform.hpp b/third_party/glm/gtx/transform.hpp deleted file mode 100755 index 0279fc8..0000000 --- a/third_party/glm/gtx/transform.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/// @ref gtx_transform -/// @file glm/gtx/transform.hpp -/// -/// @see core (dependence) -/// @see gtc_matrix_transform (dependence) -/// @see gtx_transform -/// @see gtx_transform2 -/// -/// @defgroup gtx_transform GLM_GTX_transform -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Add transformation matrices - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/matrix_transform.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_transform is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_transform extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_transform - /// @{ - - /// Transforms a matrix with a translation 4 * 4 matrix created from 3 scalars. - /// @see gtc_matrix_transform - /// @see gtx_transform - template - GLM_FUNC_DECL mat<4, 4, T, Q> translate( - vec<3, T, Q> const& v); - - /// Builds a rotation 4 * 4 matrix created from an axis of 3 scalars and an angle expressed in radians. - /// @see gtc_matrix_transform - /// @see gtx_transform - template - GLM_FUNC_DECL mat<4, 4, T, Q> rotate( - T angle, - vec<3, T, Q> const& v); - - /// Transforms a matrix with a scale 4 * 4 matrix created from a vector of 3 components. - /// @see gtc_matrix_transform - /// @see gtx_transform - template - GLM_FUNC_DECL mat<4, 4, T, Q> scale( - vec<3, T, Q> const& v); - - /// @} -}// namespace glm - -#include "transform.inl" diff --git a/third_party/glm/gtx/transform.inl b/third_party/glm/gtx/transform.inl deleted file mode 100755 index 48ee680..0000000 --- a/third_party/glm/gtx/transform.inl +++ /dev/null @@ -1,23 +0,0 @@ -/// @ref gtx_transform - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> translate(vec<3, T, Q> const& v) - { - return translate(mat<4, 4, T, Q>(static_cast(1)), v); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate(T angle, vec<3, T, Q> const& v) - { - return rotate(mat<4, 4, T, Q>(static_cast(1)), angle, v); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale(vec<3, T, Q> const& v) - { - return scale(mat<4, 4, T, Q>(static_cast(1)), v); - } - -}//namespace glm diff --git a/third_party/glm/gtx/transform2.hpp b/third_party/glm/gtx/transform2.hpp deleted file mode 100755 index 0d8ba9d..0000000 --- a/third_party/glm/gtx/transform2.hpp +++ /dev/null @@ -1,89 +0,0 @@ -/// @ref gtx_transform2 -/// @file glm/gtx/transform2.hpp -/// -/// @see core (dependence) -/// @see gtx_transform (dependence) -/// -/// @defgroup gtx_transform2 GLM_GTX_transform2 -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Add extra transformation matrices - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtx/transform.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_transform2 is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_transform2 extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_transform2 - /// @{ - - //! Transforms a matrix with a shearing on X axis. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> shearX2D(mat<3, 3, T, Q> const& m, T y); - - //! Transforms a matrix with a shearing on Y axis. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> shearY2D(mat<3, 3, T, Q> const& m, T x); - - //! Transforms a matrix with a shearing on X axis - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> shearX3D(mat<4, 4, T, Q> const& m, T y, T z); - - //! Transforms a matrix with a shearing on Y axis. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> shearY3D(mat<4, 4, T, Q> const& m, T x, T z); - - //! Transforms a matrix with a shearing on Z axis. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> shearZ3D(mat<4, 4, T, Q> const& m, T x, T y); - - //template GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shear(const mat<4, 4, T, Q> & m, shearPlane, planePoint, angle) - // Identity + tan(angle) * cross(Normal, OnPlaneVector) 0 - // - dot(PointOnPlane, normal) * OnPlaneVector 1 - - // Reflect functions seem to don't work - //template mat<3, 3, T, Q> reflect2D(const mat<3, 3, T, Q> & m, const vec<3, T, Q>& normal){return reflect2DGTX(m, normal);} //!< \brief Build a reflection matrix (from GLM_GTX_transform2 extension) - //template mat<4, 4, T, Q> reflect3D(const mat<4, 4, T, Q> & m, const vec<3, T, Q>& normal){return reflect3DGTX(m, normal);} //!< \brief Build a reflection matrix (from GLM_GTX_transform2 extension) - - //! Build planar projection matrix along normal axis. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> proj2D(mat<3, 3, T, Q> const& m, vec<3, T, Q> const& normal); - - //! Build planar projection matrix along normal axis. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> proj3D(mat<4, 4, T, Q> const & m, vec<3, T, Q> const& normal); - - //! Build a scale bias matrix. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> scaleBias(T scale, T bias); - - //! Build a scale bias matrix. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> scaleBias(mat<4, 4, T, Q> const& m, T scale, T bias); - - /// @} -}// namespace glm - -#include "transform2.inl" diff --git a/third_party/glm/gtx/transform2.inl b/third_party/glm/gtx/transform2.inl deleted file mode 100755 index 2b53198..0000000 --- a/third_party/glm/gtx/transform2.inl +++ /dev/null @@ -1,125 +0,0 @@ -/// @ref gtx_transform2 - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX2D(mat<3, 3, T, Q> const& m, T s) - { - mat<3, 3, T, Q> r(1); - r[1][0] = s; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY2D(mat<3, 3, T, Q> const& m, T s) - { - mat<3, 3, T, Q> r(1); - r[0][1] = s; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearX3D(mat<4, 4, T, Q> const& m, T s, T t) - { - mat<4, 4, T, Q> r(1); - r[0][1] = s; - r[0][2] = t; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearY3D(mat<4, 4, T, Q> const& m, T s, T t) - { - mat<4, 4, T, Q> r(1); - r[1][0] = s; - r[1][2] = t; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearZ3D(mat<4, 4, T, Q> const& m, T s, T t) - { - mat<4, 4, T, Q> r(1); - r[2][0] = s; - r[2][1] = t; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> reflect2D(mat<3, 3, T, Q> const& m, vec<3, T, Q> const& normal) - { - mat<3, 3, T, Q> r(static_cast(1)); - r[0][0] = static_cast(1) - static_cast(2) * normal.x * normal.x; - r[0][1] = -static_cast(2) * normal.x * normal.y; - r[1][0] = -static_cast(2) * normal.x * normal.y; - r[1][1] = static_cast(1) - static_cast(2) * normal.y * normal.y; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> reflect3D(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& normal) - { - mat<4, 4, T, Q> r(static_cast(1)); - r[0][0] = static_cast(1) - static_cast(2) * normal.x * normal.x; - r[0][1] = -static_cast(2) * normal.x * normal.y; - r[0][2] = -static_cast(2) * normal.x * normal.z; - - r[1][0] = -static_cast(2) * normal.x * normal.y; - r[1][1] = static_cast(1) - static_cast(2) * normal.y * normal.y; - r[1][2] = -static_cast(2) * normal.y * normal.z; - - r[2][0] = -static_cast(2) * normal.x * normal.z; - r[2][1] = -static_cast(2) * normal.y * normal.z; - r[2][2] = static_cast(1) - static_cast(2) * normal.z * normal.z; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> proj2D( - const mat<3, 3, T, Q>& m, - const vec<3, T, Q>& normal) - { - mat<3, 3, T, Q> r(static_cast(1)); - r[0][0] = static_cast(1) - normal.x * normal.x; - r[0][1] = - normal.x * normal.y; - r[1][0] = - normal.x * normal.y; - r[1][1] = static_cast(1) - normal.y * normal.y; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> proj3D( - const mat<4, 4, T, Q>& m, - const vec<3, T, Q>& normal) - { - mat<4, 4, T, Q> r(static_cast(1)); - r[0][0] = static_cast(1) - normal.x * normal.x; - r[0][1] = - normal.x * normal.y; - r[0][2] = - normal.x * normal.z; - r[1][0] = - normal.x * normal.y; - r[1][1] = static_cast(1) - normal.y * normal.y; - r[1][2] = - normal.y * normal.z; - r[2][0] = - normal.x * normal.z; - r[2][1] = - normal.y * normal.z; - r[2][2] = static_cast(1) - normal.z * normal.z; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scaleBias(T scale, T bias) - { - mat<4, 4, T, Q> result; - result[3] = vec<4, T, Q>(vec<3, T, Q>(bias), static_cast(1)); - result[0][0] = scale; - result[1][1] = scale; - result[2][2] = scale; - return result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scaleBias(mat<4, 4, T, Q> const& m, T scale, T bias) - { - return m * scaleBias(scale, bias); - } -}//namespace glm - diff --git a/third_party/glm/gtx/type_aligned.hpp b/third_party/glm/gtx/type_aligned.hpp deleted file mode 100755 index 2ae522c..0000000 --- a/third_party/glm/gtx/type_aligned.hpp +++ /dev/null @@ -1,982 +0,0 @@ -/// @ref gtx_type_aligned -/// @file glm/gtx/type_aligned.hpp -/// -/// @see core (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtx_type_aligned GLM_GTX_type_aligned -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Defines aligned types. - -#pragma once - -// Dependency: -#include "../gtc/type_precision.hpp" -#include "../gtc/quaternion.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_type_aligned is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_type_aligned extension included") -# endif -#endif - -namespace glm -{ - /////////////////////////// - // Signed int vector types - - /// @addtogroup gtx_type_aligned - /// @{ - - /// Low qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int8, aligned_lowp_int8, 1); - - /// Low qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int16, aligned_lowp_int16, 2); - - /// Low qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int32, aligned_lowp_int32, 4); - - /// Low qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int64, aligned_lowp_int64, 8); - - - /// Low qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int8_t, aligned_lowp_int8_t, 1); - - /// Low qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int16_t, aligned_lowp_int16_t, 2); - - /// Low qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int32_t, aligned_lowp_int32_t, 4); - - /// Low qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int64_t, aligned_lowp_int64_t, 8); - - - /// Low qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_i8, aligned_lowp_i8, 1); - - /// Low qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_i16, aligned_lowp_i16, 2); - - /// Low qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_i32, aligned_lowp_i32, 4); - - /// Low qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_i64, aligned_lowp_i64, 8); - - - /// Medium qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int8, aligned_mediump_int8, 1); - - /// Medium qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int16, aligned_mediump_int16, 2); - - /// Medium qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int32, aligned_mediump_int32, 4); - - /// Medium qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int64, aligned_mediump_int64, 8); - - - /// Medium qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int8_t, aligned_mediump_int8_t, 1); - - /// Medium qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int16_t, aligned_mediump_int16_t, 2); - - /// Medium qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int32_t, aligned_mediump_int32_t, 4); - - /// Medium qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int64_t, aligned_mediump_int64_t, 8); - - - /// Medium qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_i8, aligned_mediump_i8, 1); - - /// Medium qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_i16, aligned_mediump_i16, 2); - - /// Medium qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_i32, aligned_mediump_i32, 4); - - /// Medium qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_i64, aligned_mediump_i64, 8); - - - /// High qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int8, aligned_highp_int8, 1); - - /// High qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int16, aligned_highp_int16, 2); - - /// High qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int32, aligned_highp_int32, 4); - - /// High qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int64, aligned_highp_int64, 8); - - - /// High qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int8_t, aligned_highp_int8_t, 1); - - /// High qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int16_t, aligned_highp_int16_t, 2); - - /// High qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int32_t, aligned_highp_int32_t, 4); - - /// High qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int64_t, aligned_highp_int64_t, 8); - - - /// High qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_i8, aligned_highp_i8, 1); - - /// High qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_i16, aligned_highp_i16, 2); - - /// High qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_i32, aligned_highp_i32, 4); - - /// High qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_i64, aligned_highp_i64, 8); - - - /// Default qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int8, aligned_int8, 1); - - /// Default qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int16, aligned_int16, 2); - - /// Default qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int32, aligned_int32, 4); - - /// Default qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int64, aligned_int64, 8); - - - /// Default qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int8_t, aligned_int8_t, 1); - - /// Default qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int16_t, aligned_int16_t, 2); - - /// Default qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int32_t, aligned_int32_t, 4); - - /// Default qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int64_t, aligned_int64_t, 8); - - - /// Default qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i8, aligned_i8, 1); - - /// Default qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i16, aligned_i16, 2); - - /// Default qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i32, aligned_i32, 4); - - /// Default qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i64, aligned_i64, 8); - - - /// Default qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(ivec1, aligned_ivec1, 4); - - /// Default qualifier 32 bit signed integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(ivec2, aligned_ivec2, 8); - - /// Default qualifier 32 bit signed integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(ivec3, aligned_ivec3, 16); - - /// Default qualifier 32 bit signed integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(ivec4, aligned_ivec4, 16); - - - /// Default qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i8vec1, aligned_i8vec1, 1); - - /// Default qualifier 8 bit signed integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i8vec2, aligned_i8vec2, 2); - - /// Default qualifier 8 bit signed integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i8vec3, aligned_i8vec3, 4); - - /// Default qualifier 8 bit signed integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i8vec4, aligned_i8vec4, 4); - - - /// Default qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i16vec1, aligned_i16vec1, 2); - - /// Default qualifier 16 bit signed integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i16vec2, aligned_i16vec2, 4); - - /// Default qualifier 16 bit signed integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i16vec3, aligned_i16vec3, 8); - - /// Default qualifier 16 bit signed integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i16vec4, aligned_i16vec4, 8); - - - /// Default qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i32vec1, aligned_i32vec1, 4); - - /// Default qualifier 32 bit signed integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i32vec2, aligned_i32vec2, 8); - - /// Default qualifier 32 bit signed integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i32vec3, aligned_i32vec3, 16); - - /// Default qualifier 32 bit signed integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i32vec4, aligned_i32vec4, 16); - - - /// Default qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i64vec1, aligned_i64vec1, 8); - - /// Default qualifier 64 bit signed integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i64vec2, aligned_i64vec2, 16); - - /// Default qualifier 64 bit signed integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i64vec3, aligned_i64vec3, 32); - - /// Default qualifier 64 bit signed integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i64vec4, aligned_i64vec4, 32); - - - ///////////////////////////// - // Unsigned int vector types - - /// Low qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint8, aligned_lowp_uint8, 1); - - /// Low qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint16, aligned_lowp_uint16, 2); - - /// Low qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint32, aligned_lowp_uint32, 4); - - /// Low qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint64, aligned_lowp_uint64, 8); - - - /// Low qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint8_t, aligned_lowp_uint8_t, 1); - - /// Low qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint16_t, aligned_lowp_uint16_t, 2); - - /// Low qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint32_t, aligned_lowp_uint32_t, 4); - - /// Low qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint64_t, aligned_lowp_uint64_t, 8); - - - /// Low qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_u8, aligned_lowp_u8, 1); - - /// Low qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_u16, aligned_lowp_u16, 2); - - /// Low qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_u32, aligned_lowp_u32, 4); - - /// Low qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_u64, aligned_lowp_u64, 8); - - - /// Medium qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint8, aligned_mediump_uint8, 1); - - /// Medium qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint16, aligned_mediump_uint16, 2); - - /// Medium qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint32, aligned_mediump_uint32, 4); - - /// Medium qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint64, aligned_mediump_uint64, 8); - - - /// Medium qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint8_t, aligned_mediump_uint8_t, 1); - - /// Medium qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint16_t, aligned_mediump_uint16_t, 2); - - /// Medium qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint32_t, aligned_mediump_uint32_t, 4); - - /// Medium qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint64_t, aligned_mediump_uint64_t, 8); - - - /// Medium qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_u8, aligned_mediump_u8, 1); - - /// Medium qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_u16, aligned_mediump_u16, 2); - - /// Medium qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_u32, aligned_mediump_u32, 4); - - /// Medium qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_u64, aligned_mediump_u64, 8); - - - /// High qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint8, aligned_highp_uint8, 1); - - /// High qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint16, aligned_highp_uint16, 2); - - /// High qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint32, aligned_highp_uint32, 4); - - /// High qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint64, aligned_highp_uint64, 8); - - - /// High qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint8_t, aligned_highp_uint8_t, 1); - - /// High qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint16_t, aligned_highp_uint16_t, 2); - - /// High qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint32_t, aligned_highp_uint32_t, 4); - - /// High qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint64_t, aligned_highp_uint64_t, 8); - - - /// High qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_u8, aligned_highp_u8, 1); - - /// High qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_u16, aligned_highp_u16, 2); - - /// High qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_u32, aligned_highp_u32, 4); - - /// High qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_u64, aligned_highp_u64, 8); - - - /// Default qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint8, aligned_uint8, 1); - - /// Default qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint16, aligned_uint16, 2); - - /// Default qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint32, aligned_uint32, 4); - - /// Default qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint64, aligned_uint64, 8); - - - /// Default qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint8_t, aligned_uint8_t, 1); - - /// Default qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint16_t, aligned_uint16_t, 2); - - /// Default qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint32_t, aligned_uint32_t, 4); - - /// Default qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint64_t, aligned_uint64_t, 8); - - - /// Default qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u8, aligned_u8, 1); - - /// Default qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u16, aligned_u16, 2); - - /// Default qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u32, aligned_u32, 4); - - /// Default qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u64, aligned_u64, 8); - - - /// Default qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uvec1, aligned_uvec1, 4); - - /// Default qualifier 32 bit unsigned integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uvec2, aligned_uvec2, 8); - - /// Default qualifier 32 bit unsigned integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uvec3, aligned_uvec3, 16); - - /// Default qualifier 32 bit unsigned integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uvec4, aligned_uvec4, 16); - - - /// Default qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u8vec1, aligned_u8vec1, 1); - - /// Default qualifier 8 bit unsigned integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u8vec2, aligned_u8vec2, 2); - - /// Default qualifier 8 bit unsigned integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u8vec3, aligned_u8vec3, 4); - - /// Default qualifier 8 bit unsigned integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u8vec4, aligned_u8vec4, 4); - - - /// Default qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u16vec1, aligned_u16vec1, 2); - - /// Default qualifier 16 bit unsigned integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u16vec2, aligned_u16vec2, 4); - - /// Default qualifier 16 bit unsigned integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u16vec3, aligned_u16vec3, 8); - - /// Default qualifier 16 bit unsigned integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u16vec4, aligned_u16vec4, 8); - - - /// Default qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u32vec1, aligned_u32vec1, 4); - - /// Default qualifier 32 bit unsigned integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u32vec2, aligned_u32vec2, 8); - - /// Default qualifier 32 bit unsigned integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u32vec3, aligned_u32vec3, 16); - - /// Default qualifier 32 bit unsigned integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u32vec4, aligned_u32vec4, 16); - - - /// Default qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u64vec1, aligned_u64vec1, 8); - - /// Default qualifier 64 bit unsigned integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u64vec2, aligned_u64vec2, 16); - - /// Default qualifier 64 bit unsigned integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u64vec3, aligned_u64vec3, 32); - - /// Default qualifier 64 bit unsigned integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u64vec4, aligned_u64vec4, 32); - - - ////////////////////// - // Float vector types - - /// 32 bit single-qualifier floating-point aligned scalar. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(float32, aligned_float32, 4); - - /// 32 bit single-qualifier floating-point aligned scalar. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(float32_t, aligned_float32_t, 4); - - /// 32 bit single-qualifier floating-point aligned scalar. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(float32, aligned_f32, 4); - -# ifndef GLM_FORCE_SINGLE_ONLY - - /// 64 bit double-qualifier floating-point aligned scalar. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(float64, aligned_float64, 8); - - /// 64 bit double-qualifier floating-point aligned scalar. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(float64_t, aligned_float64_t, 8); - - /// 64 bit double-qualifier floating-point aligned scalar. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(float64, aligned_f64, 8); - -# endif//GLM_FORCE_SINGLE_ONLY - - - /// Single-qualifier floating-point aligned vector of 1 component. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(vec1, aligned_vec1, 4); - - /// Single-qualifier floating-point aligned vector of 2 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(vec2, aligned_vec2, 8); - - /// Single-qualifier floating-point aligned vector of 3 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(vec3, aligned_vec3, 16); - - /// Single-qualifier floating-point aligned vector of 4 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(vec4, aligned_vec4, 16); - - - /// Single-qualifier floating-point aligned vector of 1 component. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fvec1, aligned_fvec1, 4); - - /// Single-qualifier floating-point aligned vector of 2 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fvec2, aligned_fvec2, 8); - - /// Single-qualifier floating-point aligned vector of 3 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fvec3, aligned_fvec3, 16); - - /// Single-qualifier floating-point aligned vector of 4 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fvec4, aligned_fvec4, 16); - - - /// Single-qualifier floating-point aligned vector of 1 component. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32vec1, aligned_f32vec1, 4); - - /// Single-qualifier floating-point aligned vector of 2 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32vec2, aligned_f32vec2, 8); - - /// Single-qualifier floating-point aligned vector of 3 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32vec3, aligned_f32vec3, 16); - - /// Single-qualifier floating-point aligned vector of 4 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32vec4, aligned_f32vec4, 16); - - - /// Double-qualifier floating-point aligned vector of 1 component. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(dvec1, aligned_dvec1, 8); - - /// Double-qualifier floating-point aligned vector of 2 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(dvec2, aligned_dvec2, 16); - - /// Double-qualifier floating-point aligned vector of 3 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(dvec3, aligned_dvec3, 32); - - /// Double-qualifier floating-point aligned vector of 4 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(dvec4, aligned_dvec4, 32); - - -# ifndef GLM_FORCE_SINGLE_ONLY - - /// Double-qualifier floating-point aligned vector of 1 component. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64vec1, aligned_f64vec1, 8); - - /// Double-qualifier floating-point aligned vector of 2 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64vec2, aligned_f64vec2, 16); - - /// Double-qualifier floating-point aligned vector of 3 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64vec3, aligned_f64vec3, 32); - - /// Double-qualifier floating-point aligned vector of 4 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64vec4, aligned_f64vec4, 32); - -# endif//GLM_FORCE_SINGLE_ONLY - - ////////////////////// - // Float matrix types - - /// Single-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef detail::tmat1 mat1; - - /// Single-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mat2, aligned_mat2, 16); - - /// Single-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mat3, aligned_mat3, 16); - - /// Single-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mat4, aligned_mat4, 16); - - - /// Single-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef detail::tmat1x1 mat1; - - /// Single-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mat2x2, aligned_mat2x2, 16); - - /// Single-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mat3x3, aligned_mat3x3, 16); - - /// Single-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mat4x4, aligned_mat4x4, 16); - - - /// Single-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef detail::tmat1x1 fmat1; - - /// Single-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat2x2, aligned_fmat2, 16); - - /// Single-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat3x3, aligned_fmat3, 16); - - /// Single-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat4x4, aligned_fmat4, 16); - - - /// Single-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef f32 fmat1x1; - - /// Single-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat2x2, aligned_fmat2x2, 16); - - /// Single-qualifier floating-point aligned 2x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat2x3, aligned_fmat2x3, 16); - - /// Single-qualifier floating-point aligned 2x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat2x4, aligned_fmat2x4, 16); - - /// Single-qualifier floating-point aligned 3x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat3x2, aligned_fmat3x2, 16); - - /// Single-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat3x3, aligned_fmat3x3, 16); - - /// Single-qualifier floating-point aligned 3x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat3x4, aligned_fmat3x4, 16); - - /// Single-qualifier floating-point aligned 4x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat4x2, aligned_fmat4x2, 16); - - /// Single-qualifier floating-point aligned 4x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat4x3, aligned_fmat4x3, 16); - - /// Single-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat4x4, aligned_fmat4x4, 16); - - - /// Single-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef detail::tmat1x1 f32mat1; - - /// Single-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat2x2, aligned_f32mat2, 16); - - /// Single-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat3x3, aligned_f32mat3, 16); - - /// Single-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat4x4, aligned_f32mat4, 16); - - - /// Single-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef f32 f32mat1x1; - - /// Single-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat2x2, aligned_f32mat2x2, 16); - - /// Single-qualifier floating-point aligned 2x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat2x3, aligned_f32mat2x3, 16); - - /// Single-qualifier floating-point aligned 2x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat2x4, aligned_f32mat2x4, 16); - - /// Single-qualifier floating-point aligned 3x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat3x2, aligned_f32mat3x2, 16); - - /// Single-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat3x3, aligned_f32mat3x3, 16); - - /// Single-qualifier floating-point aligned 3x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat3x4, aligned_f32mat3x4, 16); - - /// Single-qualifier floating-point aligned 4x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat4x2, aligned_f32mat4x2, 16); - - /// Single-qualifier floating-point aligned 4x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat4x3, aligned_f32mat4x3, 16); - - /// Single-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat4x4, aligned_f32mat4x4, 16); - - -# ifndef GLM_FORCE_SINGLE_ONLY - - /// Double-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef detail::tmat1x1 f64mat1; - - /// Double-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat2x2, aligned_f64mat2, 32); - - /// Double-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat3x3, aligned_f64mat3, 32); - - /// Double-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat4x4, aligned_f64mat4, 32); - - - /// Double-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef f64 f64mat1x1; - - /// Double-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat2x2, aligned_f64mat2x2, 32); - - /// Double-qualifier floating-point aligned 2x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat2x3, aligned_f64mat2x3, 32); - - /// Double-qualifier floating-point aligned 2x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat2x4, aligned_f64mat2x4, 32); - - /// Double-qualifier floating-point aligned 3x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat3x2, aligned_f64mat3x2, 32); - - /// Double-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat3x3, aligned_f64mat3x3, 32); - - /// Double-qualifier floating-point aligned 3x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat3x4, aligned_f64mat3x4, 32); - - /// Double-qualifier floating-point aligned 4x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat4x2, aligned_f64mat4x2, 32); - - /// Double-qualifier floating-point aligned 4x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat4x3, aligned_f64mat4x3, 32); - - /// Double-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat4x4, aligned_f64mat4x4, 32); - -# endif//GLM_FORCE_SINGLE_ONLY - - - ////////////////////////// - // Quaternion types - - /// Single-qualifier floating-point aligned quaternion. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(quat, aligned_quat, 16); - - /// Single-qualifier floating-point aligned quaternion. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(quat, aligned_fquat, 16); - - /// Double-qualifier floating-point aligned quaternion. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(dquat, aligned_dquat, 32); - - /// Single-qualifier floating-point aligned quaternion. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32quat, aligned_f32quat, 16); - -# ifndef GLM_FORCE_SINGLE_ONLY - - /// Double-qualifier floating-point aligned quaternion. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64quat, aligned_f64quat, 32); - -# endif//GLM_FORCE_SINGLE_ONLY - - /// @} -}//namespace glm - -#include "type_aligned.inl" diff --git a/third_party/glm/gtx/type_aligned.inl b/third_party/glm/gtx/type_aligned.inl deleted file mode 100755 index 54c1b81..0000000 --- a/third_party/glm/gtx/type_aligned.inl +++ /dev/null @@ -1,6 +0,0 @@ -/// @ref gtc_type_aligned - -namespace glm -{ - -} diff --git a/third_party/glm/gtx/type_trait.hpp b/third_party/glm/gtx/type_trait.hpp deleted file mode 100755 index 56685c8..0000000 --- a/third_party/glm/gtx/type_trait.hpp +++ /dev/null @@ -1,85 +0,0 @@ -/// @ref gtx_type_trait -/// @file glm/gtx/type_trait.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_type_trait GLM_GTX_type_trait -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Defines traits for each type. - -#pragma once - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_type_trait is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_type_trait extension included") -# endif -#endif - -// Dependency: -#include "../detail/qualifier.hpp" -#include "../gtc/quaternion.hpp" -#include "../gtx/dual_quaternion.hpp" - -namespace glm -{ - /// @addtogroup gtx_type_trait - /// @{ - - template - struct type - { - static bool const is_vec = false; - static bool const is_mat = false; - static bool const is_quat = false; - static length_t const components = 0; - static length_t const cols = 0; - static length_t const rows = 0; - }; - - template - struct type > - { - static bool const is_vec = true; - static bool const is_mat = false; - static bool const is_quat = false; - static length_t const components = L; - }; - - template - struct type > - { - static bool const is_vec = false; - static bool const is_mat = true; - static bool const is_quat = false; - static length_t const components = C; - static length_t const cols = C; - static length_t const rows = R; - }; - - template - struct type > - { - static bool const is_vec = false; - static bool const is_mat = false; - static bool const is_quat = true; - static length_t const components = 4; - }; - - template - struct type > - { - static bool const is_vec = false; - static bool const is_mat = false; - static bool const is_quat = true; - static length_t const components = 8; - }; - - /// @} -}//namespace glm - -#include "type_trait.inl" diff --git a/third_party/glm/gtx/type_trait.inl b/third_party/glm/gtx/type_trait.inl deleted file mode 100755 index 045de95..0000000 --- a/third_party/glm/gtx/type_trait.inl +++ /dev/null @@ -1,61 +0,0 @@ -/// @ref gtx_type_trait - -namespace glm -{ - template - bool const type::is_vec; - template - bool const type::is_mat; - template - bool const type::is_quat; - template - length_t const type::components; - template - length_t const type::cols; - template - length_t const type::rows; - - // vec - template - bool const type >::is_vec; - template - bool const type >::is_mat; - template - bool const type >::is_quat; - template - length_t const type >::components; - - // mat - template - bool const type >::is_vec; - template - bool const type >::is_mat; - template - bool const type >::is_quat; - template - length_t const type >::components; - template - length_t const type >::cols; - template - length_t const type >::rows; - - // tquat - template - bool const type >::is_vec; - template - bool const type >::is_mat; - template - bool const type >::is_quat; - template - length_t const type >::components; - - // tdualquat - template - bool const type >::is_vec; - template - bool const type >::is_mat; - template - bool const type >::is_quat; - template - length_t const type >::components; -}//namespace glm diff --git a/third_party/glm/gtx/vec_swizzle.hpp b/third_party/glm/gtx/vec_swizzle.hpp deleted file mode 100755 index 1c49abc..0000000 --- a/third_party/glm/gtx/vec_swizzle.hpp +++ /dev/null @@ -1,2782 +0,0 @@ -/// @ref gtx_vec_swizzle -/// @file glm/gtx/vec_swizzle.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_vec_swizzle GLM_GTX_vec_swizzle -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Functions to perform swizzle operation. - -#pragma once - -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_vec_swizzle is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_vec_swizzle extension included") -# endif -#endif - -namespace glm { - // xx - template - GLM_INLINE glm::vec<2, T, Q> xx(const glm::vec<1, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.x); - } - - template - GLM_INLINE glm::vec<2, T, Q> xx(const glm::vec<2, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.x); - } - - template - GLM_INLINE glm::vec<2, T, Q> xx(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.x); - } - - template - GLM_INLINE glm::vec<2, T, Q> xx(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.x); - } - - // xy - template - GLM_INLINE glm::vec<2, T, Q> xy(const glm::vec<2, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.y); - } - - template - GLM_INLINE glm::vec<2, T, Q> xy(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.y); - } - - template - GLM_INLINE glm::vec<2, T, Q> xy(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.y); - } - - // xz - template - GLM_INLINE glm::vec<2, T, Q> xz(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.z); - } - - template - GLM_INLINE glm::vec<2, T, Q> xz(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.z); - } - - // xw - template - GLM_INLINE glm::vec<2, T, Q> xw(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.w); - } - - // yx - template - GLM_INLINE glm::vec<2, T, Q> yx(const glm::vec<2, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.x); - } - - template - GLM_INLINE glm::vec<2, T, Q> yx(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.x); - } - - template - GLM_INLINE glm::vec<2, T, Q> yx(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.x); - } - - // yy - template - GLM_INLINE glm::vec<2, T, Q> yy(const glm::vec<2, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.y); - } - - template - GLM_INLINE glm::vec<2, T, Q> yy(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.y); - } - - template - GLM_INLINE glm::vec<2, T, Q> yy(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.y); - } - - // yz - template - GLM_INLINE glm::vec<2, T, Q> yz(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.z); - } - - template - GLM_INLINE glm::vec<2, T, Q> yz(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.z); - } - - // yw - template - GLM_INLINE glm::vec<2, T, Q> yw(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.w); - } - - // zx - template - GLM_INLINE glm::vec<2, T, Q> zx(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.z, v.x); - } - - template - GLM_INLINE glm::vec<2, T, Q> zx(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.z, v.x); - } - - // zy - template - GLM_INLINE glm::vec<2, T, Q> zy(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.z, v.y); - } - - template - GLM_INLINE glm::vec<2, T, Q> zy(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.z, v.y); - } - - // zz - template - GLM_INLINE glm::vec<2, T, Q> zz(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.z, v.z); - } - - template - GLM_INLINE glm::vec<2, T, Q> zz(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.z, v.z); - } - - // zw - template - GLM_INLINE glm::vec<2, T, Q> zw(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.z, v.w); - } - - // wx - template - GLM_INLINE glm::vec<2, T, Q> wx(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.w, v.x); - } - - // wy - template - GLM_INLINE glm::vec<2, T, Q> wy(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.w, v.y); - } - - // wz - template - GLM_INLINE glm::vec<2, T, Q> wz(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.w, v.z); - } - - // ww - template - GLM_INLINE glm::vec<2, T, Q> ww(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.w, v.w); - } - - // xxx - template - GLM_INLINE glm::vec<3, T, Q> xxx(const glm::vec<1, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> xxx(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> xxx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> xxx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.x); - } - - // xxy - template - GLM_INLINE glm::vec<3, T, Q> xxy(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> xxy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> xxy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.y); - } - - // xxz - template - GLM_INLINE glm::vec<3, T, Q> xxz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> xxz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.z); - } - - // xxw - template - GLM_INLINE glm::vec<3, T, Q> xxw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.w); - } - - // xyx - template - GLM_INLINE glm::vec<3, T, Q> xyx(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> xyx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> xyx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.x); - } - - // xyy - template - GLM_INLINE glm::vec<3, T, Q> xyy(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> xyy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> xyy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.y); - } - - // xyz - template - GLM_INLINE glm::vec<3, T, Q> xyz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> xyz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.z); - } - - // xyw - template - GLM_INLINE glm::vec<3, T, Q> xyw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.w); - } - - // xzx - template - GLM_INLINE glm::vec<3, T, Q> xzx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.z, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> xzx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.z, v.x); - } - - // xzy - template - GLM_INLINE glm::vec<3, T, Q> xzy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.z, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> xzy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.z, v.y); - } - - // xzz - template - GLM_INLINE glm::vec<3, T, Q> xzz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.z, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> xzz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.z, v.z); - } - - // xzw - template - GLM_INLINE glm::vec<3, T, Q> xzw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.z, v.w); - } - - // xwx - template - GLM_INLINE glm::vec<3, T, Q> xwx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.w, v.x); - } - - // xwy - template - GLM_INLINE glm::vec<3, T, Q> xwy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.w, v.y); - } - - // xwz - template - GLM_INLINE glm::vec<3, T, Q> xwz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.w, v.z); - } - - // xww - template - GLM_INLINE glm::vec<3, T, Q> xww(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.w, v.w); - } - - // yxx - template - GLM_INLINE glm::vec<3, T, Q> yxx(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> yxx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> yxx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.x); - } - - // yxy - template - GLM_INLINE glm::vec<3, T, Q> yxy(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> yxy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> yxy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.y); - } - - // yxz - template - GLM_INLINE glm::vec<3, T, Q> yxz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> yxz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.z); - } - - // yxw - template - GLM_INLINE glm::vec<3, T, Q> yxw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.w); - } - - // yyx - template - GLM_INLINE glm::vec<3, T, Q> yyx(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> yyx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> yyx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.x); - } - - // yyy - template - GLM_INLINE glm::vec<3, T, Q> yyy(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> yyy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> yyy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.y); - } - - // yyz - template - GLM_INLINE glm::vec<3, T, Q> yyz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> yyz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.z); - } - - // yyw - template - GLM_INLINE glm::vec<3, T, Q> yyw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.w); - } - - // yzx - template - GLM_INLINE glm::vec<3, T, Q> yzx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.z, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> yzx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.z, v.x); - } - - // yzy - template - GLM_INLINE glm::vec<3, T, Q> yzy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.z, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> yzy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.z, v.y); - } - - // yzz - template - GLM_INLINE glm::vec<3, T, Q> yzz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.z, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> yzz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.z, v.z); - } - - // yzw - template - GLM_INLINE glm::vec<3, T, Q> yzw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.z, v.w); - } - - // ywx - template - GLM_INLINE glm::vec<3, T, Q> ywx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.w, v.x); - } - - // ywy - template - GLM_INLINE glm::vec<3, T, Q> ywy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.w, v.y); - } - - // ywz - template - GLM_INLINE glm::vec<3, T, Q> ywz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.w, v.z); - } - - // yww - template - GLM_INLINE glm::vec<3, T, Q> yww(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.w, v.w); - } - - // zxx - template - GLM_INLINE glm::vec<3, T, Q> zxx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.x, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> zxx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.x, v.x); - } - - // zxy - template - GLM_INLINE glm::vec<3, T, Q> zxy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.x, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> zxy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.x, v.y); - } - - // zxz - template - GLM_INLINE glm::vec<3, T, Q> zxz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.x, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> zxz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.x, v.z); - } - - // zxw - template - GLM_INLINE glm::vec<3, T, Q> zxw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.x, v.w); - } - - // zyx - template - GLM_INLINE glm::vec<3, T, Q> zyx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.y, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> zyx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.y, v.x); - } - - // zyy - template - GLM_INLINE glm::vec<3, T, Q> zyy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.y, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> zyy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.y, v.y); - } - - // zyz - template - GLM_INLINE glm::vec<3, T, Q> zyz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.y, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> zyz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.y, v.z); - } - - // zyw - template - GLM_INLINE glm::vec<3, T, Q> zyw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.y, v.w); - } - - // zzx - template - GLM_INLINE glm::vec<3, T, Q> zzx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.z, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> zzx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.z, v.x); - } - - // zzy - template - GLM_INLINE glm::vec<3, T, Q> zzy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.z, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> zzy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.z, v.y); - } - - // zzz - template - GLM_INLINE glm::vec<3, T, Q> zzz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.z, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> zzz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.z, v.z); - } - - // zzw - template - GLM_INLINE glm::vec<3, T, Q> zzw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.z, v.w); - } - - // zwx - template - GLM_INLINE glm::vec<3, T, Q> zwx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.w, v.x); - } - - // zwy - template - GLM_INLINE glm::vec<3, T, Q> zwy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.w, v.y); - } - - // zwz - template - GLM_INLINE glm::vec<3, T, Q> zwz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.w, v.z); - } - - // zww - template - GLM_INLINE glm::vec<3, T, Q> zww(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.w, v.w); - } - - // wxx - template - GLM_INLINE glm::vec<3, T, Q> wxx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.x, v.x); - } - - // wxy - template - GLM_INLINE glm::vec<3, T, Q> wxy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.x, v.y); - } - - // wxz - template - GLM_INLINE glm::vec<3, T, Q> wxz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.x, v.z); - } - - // wxw - template - GLM_INLINE glm::vec<3, T, Q> wxw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.x, v.w); - } - - // wyx - template - GLM_INLINE glm::vec<3, T, Q> wyx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.y, v.x); - } - - // wyy - template - GLM_INLINE glm::vec<3, T, Q> wyy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.y, v.y); - } - - // wyz - template - GLM_INLINE glm::vec<3, T, Q> wyz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.y, v.z); - } - - // wyw - template - GLM_INLINE glm::vec<3, T, Q> wyw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.y, v.w); - } - - // wzx - template - GLM_INLINE glm::vec<3, T, Q> wzx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.z, v.x); - } - - // wzy - template - GLM_INLINE glm::vec<3, T, Q> wzy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.z, v.y); - } - - // wzz - template - GLM_INLINE glm::vec<3, T, Q> wzz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.z, v.z); - } - - // wzw - template - GLM_INLINE glm::vec<3, T, Q> wzw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.z, v.w); - } - - // wwx - template - GLM_INLINE glm::vec<3, T, Q> wwx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.w, v.x); - } - - // wwy - template - GLM_INLINE glm::vec<3, T, Q> wwy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.w, v.y); - } - - // wwz - template - GLM_INLINE glm::vec<3, T, Q> wwz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.w, v.z); - } - - // www - template - GLM_INLINE glm::vec<3, T, Q> www(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.w, v.w); - } - - // xxxx - template - GLM_INLINE glm::vec<4, T, Q> xxxx(const glm::vec<1, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxxx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); - } - - // xxxy - template - GLM_INLINE glm::vec<4, T, Q> xxxy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y); - } - - // xxxz - template - GLM_INLINE glm::vec<4, T, Q> xxxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.z); - } - - // xxxw - template - GLM_INLINE glm::vec<4, T, Q> xxxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.w); - } - - // xxyx - template - GLM_INLINE glm::vec<4, T, Q> xxyx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x); - } - - // xxyy - template - GLM_INLINE glm::vec<4, T, Q> xxyy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y); - } - - // xxyz - template - GLM_INLINE glm::vec<4, T, Q> xxyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.z); - } - - // xxyw - template - GLM_INLINE glm::vec<4, T, Q> xxyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.w); - } - - // xxzx - template - GLM_INLINE glm::vec<4, T, Q> xxzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.z, v.x); - } - - // xxzy - template - GLM_INLINE glm::vec<4, T, Q> xxzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.z, v.y); - } - - // xxzz - template - GLM_INLINE glm::vec<4, T, Q> xxzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.z, v.z); - } - - // xxzw - template - GLM_INLINE glm::vec<4, T, Q> xxzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.z, v.w); - } - - // xxwx - template - GLM_INLINE glm::vec<4, T, Q> xxwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.w, v.x); - } - - // xxwy - template - GLM_INLINE glm::vec<4, T, Q> xxwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.w, v.y); - } - - // xxwz - template - GLM_INLINE glm::vec<4, T, Q> xxwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.w, v.z); - } - - // xxww - template - GLM_INLINE glm::vec<4, T, Q> xxww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.w, v.w); - } - - // xyxx - template - GLM_INLINE glm::vec<4, T, Q> xyxx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x); - } - - // xyxy - template - GLM_INLINE glm::vec<4, T, Q> xyxy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y); - } - - // xyxz - template - GLM_INLINE glm::vec<4, T, Q> xyxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.z); - } - - // xyxw - template - GLM_INLINE glm::vec<4, T, Q> xyxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.w); - } - - // xyyx - template - GLM_INLINE glm::vec<4, T, Q> xyyx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x); - } - - // xyyy - template - GLM_INLINE glm::vec<4, T, Q> xyyy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y); - } - - // xyyz - template - GLM_INLINE glm::vec<4, T, Q> xyyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.z); - } - - // xyyw - template - GLM_INLINE glm::vec<4, T, Q> xyyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.w); - } - - // xyzx - template - GLM_INLINE glm::vec<4, T, Q> xyzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.z, v.x); - } - - // xyzy - template - GLM_INLINE glm::vec<4, T, Q> xyzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.z, v.y); - } - - // xyzz - template - GLM_INLINE glm::vec<4, T, Q> xyzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.z, v.z); - } - - // xyzw - template - GLM_INLINE glm::vec<4, T, Q> xyzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.z, v.w); - } - - // xywx - template - GLM_INLINE glm::vec<4, T, Q> xywx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.w, v.x); - } - - // xywy - template - GLM_INLINE glm::vec<4, T, Q> xywy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.w, v.y); - } - - // xywz - template - GLM_INLINE glm::vec<4, T, Q> xywz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.w, v.z); - } - - // xyww - template - GLM_INLINE glm::vec<4, T, Q> xyww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.w, v.w); - } - - // xzxx - template - GLM_INLINE glm::vec<4, T, Q> xzxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.x, v.x); - } - - // xzxy - template - GLM_INLINE glm::vec<4, T, Q> xzxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.x, v.y); - } - - // xzxz - template - GLM_INLINE glm::vec<4, T, Q> xzxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.x, v.z); - } - - // xzxw - template - GLM_INLINE glm::vec<4, T, Q> xzxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.x, v.w); - } - - // xzyx - template - GLM_INLINE glm::vec<4, T, Q> xzyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.y, v.x); - } - - // xzyy - template - GLM_INLINE glm::vec<4, T, Q> xzyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.y, v.y); - } - - // xzyz - template - GLM_INLINE glm::vec<4, T, Q> xzyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.y, v.z); - } - - // xzyw - template - GLM_INLINE glm::vec<4, T, Q> xzyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.y, v.w); - } - - // xzzx - template - GLM_INLINE glm::vec<4, T, Q> xzzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.z, v.x); - } - - // xzzy - template - GLM_INLINE glm::vec<4, T, Q> xzzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.z, v.y); - } - - // xzzz - template - GLM_INLINE glm::vec<4, T, Q> xzzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.z, v.z); - } - - // xzzw - template - GLM_INLINE glm::vec<4, T, Q> xzzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.z, v.w); - } - - // xzwx - template - GLM_INLINE glm::vec<4, T, Q> xzwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.w, v.x); - } - - // xzwy - template - GLM_INLINE glm::vec<4, T, Q> xzwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.w, v.y); - } - - // xzwz - template - GLM_INLINE glm::vec<4, T, Q> xzwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.w, v.z); - } - - // xzww - template - GLM_INLINE glm::vec<4, T, Q> xzww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.w, v.w); - } - - // xwxx - template - GLM_INLINE glm::vec<4, T, Q> xwxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.x, v.x); - } - - // xwxy - template - GLM_INLINE glm::vec<4, T, Q> xwxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.x, v.y); - } - - // xwxz - template - GLM_INLINE glm::vec<4, T, Q> xwxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.x, v.z); - } - - // xwxw - template - GLM_INLINE glm::vec<4, T, Q> xwxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.x, v.w); - } - - // xwyx - template - GLM_INLINE glm::vec<4, T, Q> xwyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.y, v.x); - } - - // xwyy - template - GLM_INLINE glm::vec<4, T, Q> xwyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.y, v.y); - } - - // xwyz - template - GLM_INLINE glm::vec<4, T, Q> xwyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.y, v.z); - } - - // xwyw - template - GLM_INLINE glm::vec<4, T, Q> xwyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.y, v.w); - } - - // xwzx - template - GLM_INLINE glm::vec<4, T, Q> xwzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.z, v.x); - } - - // xwzy - template - GLM_INLINE glm::vec<4, T, Q> xwzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.z, v.y); - } - - // xwzz - template - GLM_INLINE glm::vec<4, T, Q> xwzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.z, v.z); - } - - // xwzw - template - GLM_INLINE glm::vec<4, T, Q> xwzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.z, v.w); - } - - // xwwx - template - GLM_INLINE glm::vec<4, T, Q> xwwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.w, v.x); - } - - // xwwy - template - GLM_INLINE glm::vec<4, T, Q> xwwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.w, v.y); - } - - // xwwz - template - GLM_INLINE glm::vec<4, T, Q> xwwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.w, v.z); - } - - // xwww - template - GLM_INLINE glm::vec<4, T, Q> xwww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.w, v.w); - } - - // yxxx - template - GLM_INLINE glm::vec<4, T, Q> yxxx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x); - } - - // yxxy - template - GLM_INLINE glm::vec<4, T, Q> yxxy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y); - } - - // yxxz - template - GLM_INLINE glm::vec<4, T, Q> yxxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.z); - } - - // yxxw - template - GLM_INLINE glm::vec<4, T, Q> yxxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.w); - } - - // yxyx - template - GLM_INLINE glm::vec<4, T, Q> yxyx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x); - } - - // yxyy - template - GLM_INLINE glm::vec<4, T, Q> yxyy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y); - } - - // yxyz - template - GLM_INLINE glm::vec<4, T, Q> yxyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.z); - } - - // yxyw - template - GLM_INLINE glm::vec<4, T, Q> yxyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.w); - } - - // yxzx - template - GLM_INLINE glm::vec<4, T, Q> yxzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.z, v.x); - } - - // yxzy - template - GLM_INLINE glm::vec<4, T, Q> yxzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.z, v.y); - } - - // yxzz - template - GLM_INLINE glm::vec<4, T, Q> yxzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.z, v.z); - } - - // yxzw - template - GLM_INLINE glm::vec<4, T, Q> yxzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.z, v.w); - } - - // yxwx - template - GLM_INLINE glm::vec<4, T, Q> yxwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.w, v.x); - } - - // yxwy - template - GLM_INLINE glm::vec<4, T, Q> yxwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.w, v.y); - } - - // yxwz - template - GLM_INLINE glm::vec<4, T, Q> yxwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.w, v.z); - } - - // yxww - template - GLM_INLINE glm::vec<4, T, Q> yxww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.w, v.w); - } - - // yyxx - template - GLM_INLINE glm::vec<4, T, Q> yyxx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x); - } - - // yyxy - template - GLM_INLINE glm::vec<4, T, Q> yyxy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y); - } - - // yyxz - template - GLM_INLINE glm::vec<4, T, Q> yyxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.z); - } - - // yyxw - template - GLM_INLINE glm::vec<4, T, Q> yyxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.w); - } - - // yyyx - template - GLM_INLINE glm::vec<4, T, Q> yyyx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x); - } - - // yyyy - template - GLM_INLINE glm::vec<4, T, Q> yyyy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y); - } - - // yyyz - template - GLM_INLINE glm::vec<4, T, Q> yyyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.z); - } - - // yyyw - template - GLM_INLINE glm::vec<4, T, Q> yyyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.w); - } - - // yyzx - template - GLM_INLINE glm::vec<4, T, Q> yyzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.z, v.x); - } - - // yyzy - template - GLM_INLINE glm::vec<4, T, Q> yyzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.z, v.y); - } - - // yyzz - template - GLM_INLINE glm::vec<4, T, Q> yyzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.z, v.z); - } - - // yyzw - template - GLM_INLINE glm::vec<4, T, Q> yyzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.z, v.w); - } - - // yywx - template - GLM_INLINE glm::vec<4, T, Q> yywx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.w, v.x); - } - - // yywy - template - GLM_INLINE glm::vec<4, T, Q> yywy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.w, v.y); - } - - // yywz - template - GLM_INLINE glm::vec<4, T, Q> yywz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.w, v.z); - } - - // yyww - template - GLM_INLINE glm::vec<4, T, Q> yyww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.w, v.w); - } - - // yzxx - template - GLM_INLINE glm::vec<4, T, Q> yzxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.x, v.x); - } - - // yzxy - template - GLM_INLINE glm::vec<4, T, Q> yzxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.x, v.y); - } - - // yzxz - template - GLM_INLINE glm::vec<4, T, Q> yzxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.x, v.z); - } - - // yzxw - template - GLM_INLINE glm::vec<4, T, Q> yzxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.x, v.w); - } - - // yzyx - template - GLM_INLINE glm::vec<4, T, Q> yzyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.y, v.x); - } - - // yzyy - template - GLM_INLINE glm::vec<4, T, Q> yzyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.y, v.y); - } - - // yzyz - template - GLM_INLINE glm::vec<4, T, Q> yzyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.y, v.z); - } - - // yzyw - template - GLM_INLINE glm::vec<4, T, Q> yzyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.y, v.w); - } - - // yzzx - template - GLM_INLINE glm::vec<4, T, Q> yzzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.z, v.x); - } - - // yzzy - template - GLM_INLINE glm::vec<4, T, Q> yzzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.z, v.y); - } - - // yzzz - template - GLM_INLINE glm::vec<4, T, Q> yzzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.z, v.z); - } - - // yzzw - template - GLM_INLINE glm::vec<4, T, Q> yzzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.z, v.w); - } - - // yzwx - template - GLM_INLINE glm::vec<4, T, Q> yzwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.w, v.x); - } - - // yzwy - template - GLM_INLINE glm::vec<4, T, Q> yzwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.w, v.y); - } - - // yzwz - template - GLM_INLINE glm::vec<4, T, Q> yzwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.w, v.z); - } - - // yzww - template - GLM_INLINE glm::vec<4, T, Q> yzww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.w, v.w); - } - - // ywxx - template - GLM_INLINE glm::vec<4, T, Q> ywxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.x, v.x); - } - - // ywxy - template - GLM_INLINE glm::vec<4, T, Q> ywxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.x, v.y); - } - - // ywxz - template - GLM_INLINE glm::vec<4, T, Q> ywxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.x, v.z); - } - - // ywxw - template - GLM_INLINE glm::vec<4, T, Q> ywxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.x, v.w); - } - - // ywyx - template - GLM_INLINE glm::vec<4, T, Q> ywyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.y, v.x); - } - - // ywyy - template - GLM_INLINE glm::vec<4, T, Q> ywyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.y, v.y); - } - - // ywyz - template - GLM_INLINE glm::vec<4, T, Q> ywyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.y, v.z); - } - - // ywyw - template - GLM_INLINE glm::vec<4, T, Q> ywyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.y, v.w); - } - - // ywzx - template - GLM_INLINE glm::vec<4, T, Q> ywzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.z, v.x); - } - - // ywzy - template - GLM_INLINE glm::vec<4, T, Q> ywzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.z, v.y); - } - - // ywzz - template - GLM_INLINE glm::vec<4, T, Q> ywzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.z, v.z); - } - - // ywzw - template - GLM_INLINE glm::vec<4, T, Q> ywzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.z, v.w); - } - - // ywwx - template - GLM_INLINE glm::vec<4, T, Q> ywwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.w, v.x); - } - - // ywwy - template - GLM_INLINE glm::vec<4, T, Q> ywwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.w, v.y); - } - - // ywwz - template - GLM_INLINE glm::vec<4, T, Q> ywwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.w, v.z); - } - - // ywww - template - GLM_INLINE glm::vec<4, T, Q> ywww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.w, v.w); - } - - // zxxx - template - GLM_INLINE glm::vec<4, T, Q> zxxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.x, v.x); - } - - // zxxy - template - GLM_INLINE glm::vec<4, T, Q> zxxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.x, v.y); - } - - // zxxz - template - GLM_INLINE glm::vec<4, T, Q> zxxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.x, v.z); - } - - // zxxw - template - GLM_INLINE glm::vec<4, T, Q> zxxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.x, v.w); - } - - // zxyx - template - GLM_INLINE glm::vec<4, T, Q> zxyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.y, v.x); - } - - // zxyy - template - GLM_INLINE glm::vec<4, T, Q> zxyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.y, v.y); - } - - // zxyz - template - GLM_INLINE glm::vec<4, T, Q> zxyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.y, v.z); - } - - // zxyw - template - GLM_INLINE glm::vec<4, T, Q> zxyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.y, v.w); - } - - // zxzx - template - GLM_INLINE glm::vec<4, T, Q> zxzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.z, v.x); - } - - // zxzy - template - GLM_INLINE glm::vec<4, T, Q> zxzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.z, v.y); - } - - // zxzz - template - GLM_INLINE glm::vec<4, T, Q> zxzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.z, v.z); - } - - // zxzw - template - GLM_INLINE glm::vec<4, T, Q> zxzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.z, v.w); - } - - // zxwx - template - GLM_INLINE glm::vec<4, T, Q> zxwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.w, v.x); - } - - // zxwy - template - GLM_INLINE glm::vec<4, T, Q> zxwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.w, v.y); - } - - // zxwz - template - GLM_INLINE glm::vec<4, T, Q> zxwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.w, v.z); - } - - // zxww - template - GLM_INLINE glm::vec<4, T, Q> zxww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.w, v.w); - } - - // zyxx - template - GLM_INLINE glm::vec<4, T, Q> zyxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.x, v.x); - } - - // zyxy - template - GLM_INLINE glm::vec<4, T, Q> zyxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.x, v.y); - } - - // zyxz - template - GLM_INLINE glm::vec<4, T, Q> zyxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.x, v.z); - } - - // zyxw - template - GLM_INLINE glm::vec<4, T, Q> zyxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.x, v.w); - } - - // zyyx - template - GLM_INLINE glm::vec<4, T, Q> zyyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.y, v.x); - } - - // zyyy - template - GLM_INLINE glm::vec<4, T, Q> zyyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.y, v.y); - } - - // zyyz - template - GLM_INLINE glm::vec<4, T, Q> zyyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.y, v.z); - } - - // zyyw - template - GLM_INLINE glm::vec<4, T, Q> zyyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.y, v.w); - } - - // zyzx - template - GLM_INLINE glm::vec<4, T, Q> zyzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.z, v.x); - } - - // zyzy - template - GLM_INLINE glm::vec<4, T, Q> zyzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.z, v.y); - } - - // zyzz - template - GLM_INLINE glm::vec<4, T, Q> zyzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.z, v.z); - } - - // zyzw - template - GLM_INLINE glm::vec<4, T, Q> zyzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.z, v.w); - } - - // zywx - template - GLM_INLINE glm::vec<4, T, Q> zywx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.w, v.x); - } - - // zywy - template - GLM_INLINE glm::vec<4, T, Q> zywy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.w, v.y); - } - - // zywz - template - GLM_INLINE glm::vec<4, T, Q> zywz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.w, v.z); - } - - // zyww - template - GLM_INLINE glm::vec<4, T, Q> zyww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.w, v.w); - } - - // zzxx - template - GLM_INLINE glm::vec<4, T, Q> zzxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.x, v.x); - } - - // zzxy - template - GLM_INLINE glm::vec<4, T, Q> zzxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.x, v.y); - } - - // zzxz - template - GLM_INLINE glm::vec<4, T, Q> zzxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.x, v.z); - } - - // zzxw - template - GLM_INLINE glm::vec<4, T, Q> zzxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.x, v.w); - } - - // zzyx - template - GLM_INLINE glm::vec<4, T, Q> zzyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.y, v.x); - } - - // zzyy - template - GLM_INLINE glm::vec<4, T, Q> zzyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.y, v.y); - } - - // zzyz - template - GLM_INLINE glm::vec<4, T, Q> zzyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.y, v.z); - } - - // zzyw - template - GLM_INLINE glm::vec<4, T, Q> zzyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.y, v.w); - } - - // zzzx - template - GLM_INLINE glm::vec<4, T, Q> zzzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.z, v.x); - } - - // zzzy - template - GLM_INLINE glm::vec<4, T, Q> zzzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.z, v.y); - } - - // zzzz - template - GLM_INLINE glm::vec<4, T, Q> zzzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.z, v.z); - } - - // zzzw - template - GLM_INLINE glm::vec<4, T, Q> zzzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.z, v.w); - } - - // zzwx - template - GLM_INLINE glm::vec<4, T, Q> zzwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.w, v.x); - } - - // zzwy - template - GLM_INLINE glm::vec<4, T, Q> zzwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.w, v.y); - } - - // zzwz - template - GLM_INLINE glm::vec<4, T, Q> zzwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.w, v.z); - } - - // zzww - template - GLM_INLINE glm::vec<4, T, Q> zzww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.w, v.w); - } - - // zwxx - template - GLM_INLINE glm::vec<4, T, Q> zwxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.x, v.x); - } - - // zwxy - template - GLM_INLINE glm::vec<4, T, Q> zwxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.x, v.y); - } - - // zwxz - template - GLM_INLINE glm::vec<4, T, Q> zwxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.x, v.z); - } - - // zwxw - template - GLM_INLINE glm::vec<4, T, Q> zwxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.x, v.w); - } - - // zwyx - template - GLM_INLINE glm::vec<4, T, Q> zwyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.y, v.x); - } - - // zwyy - template - GLM_INLINE glm::vec<4, T, Q> zwyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.y, v.y); - } - - // zwyz - template - GLM_INLINE glm::vec<4, T, Q> zwyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.y, v.z); - } - - // zwyw - template - GLM_INLINE glm::vec<4, T, Q> zwyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.y, v.w); - } - - // zwzx - template - GLM_INLINE glm::vec<4, T, Q> zwzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.z, v.x); - } - - // zwzy - template - GLM_INLINE glm::vec<4, T, Q> zwzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.z, v.y); - } - - // zwzz - template - GLM_INLINE glm::vec<4, T, Q> zwzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.z, v.z); - } - - // zwzw - template - GLM_INLINE glm::vec<4, T, Q> zwzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.z, v.w); - } - - // zwwx - template - GLM_INLINE glm::vec<4, T, Q> zwwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.w, v.x); - } - - // zwwy - template - GLM_INLINE glm::vec<4, T, Q> zwwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.w, v.y); - } - - // zwwz - template - GLM_INLINE glm::vec<4, T, Q> zwwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.w, v.z); - } - - // zwww - template - GLM_INLINE glm::vec<4, T, Q> zwww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.w, v.w); - } - - // wxxx - template - GLM_INLINE glm::vec<4, T, Q> wxxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.x, v.x); - } - - // wxxy - template - GLM_INLINE glm::vec<4, T, Q> wxxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.x, v.y); - } - - // wxxz - template - GLM_INLINE glm::vec<4, T, Q> wxxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.x, v.z); - } - - // wxxw - template - GLM_INLINE glm::vec<4, T, Q> wxxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.x, v.w); - } - - // wxyx - template - GLM_INLINE glm::vec<4, T, Q> wxyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.y, v.x); - } - - // wxyy - template - GLM_INLINE glm::vec<4, T, Q> wxyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.y, v.y); - } - - // wxyz - template - GLM_INLINE glm::vec<4, T, Q> wxyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.y, v.z); - } - - // wxyw - template - GLM_INLINE glm::vec<4, T, Q> wxyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.y, v.w); - } - - // wxzx - template - GLM_INLINE glm::vec<4, T, Q> wxzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.z, v.x); - } - - // wxzy - template - GLM_INLINE glm::vec<4, T, Q> wxzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.z, v.y); - } - - // wxzz - template - GLM_INLINE glm::vec<4, T, Q> wxzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.z, v.z); - } - - // wxzw - template - GLM_INLINE glm::vec<4, T, Q> wxzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.z, v.w); - } - - // wxwx - template - GLM_INLINE glm::vec<4, T, Q> wxwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.w, v.x); - } - - // wxwy - template - GLM_INLINE glm::vec<4, T, Q> wxwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.w, v.y); - } - - // wxwz - template - GLM_INLINE glm::vec<4, T, Q> wxwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.w, v.z); - } - - // wxww - template - GLM_INLINE glm::vec<4, T, Q> wxww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.w, v.w); - } - - // wyxx - template - GLM_INLINE glm::vec<4, T, Q> wyxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.x, v.x); - } - - // wyxy - template - GLM_INLINE glm::vec<4, T, Q> wyxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.x, v.y); - } - - // wyxz - template - GLM_INLINE glm::vec<4, T, Q> wyxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.x, v.z); - } - - // wyxw - template - GLM_INLINE glm::vec<4, T, Q> wyxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.x, v.w); - } - - // wyyx - template - GLM_INLINE glm::vec<4, T, Q> wyyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.y, v.x); - } - - // wyyy - template - GLM_INLINE glm::vec<4, T, Q> wyyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.y, v.y); - } - - // wyyz - template - GLM_INLINE glm::vec<4, T, Q> wyyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.y, v.z); - } - - // wyyw - template - GLM_INLINE glm::vec<4, T, Q> wyyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.y, v.w); - } - - // wyzx - template - GLM_INLINE glm::vec<4, T, Q> wyzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.z, v.x); - } - - // wyzy - template - GLM_INLINE glm::vec<4, T, Q> wyzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.z, v.y); - } - - // wyzz - template - GLM_INLINE glm::vec<4, T, Q> wyzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.z, v.z); - } - - // wyzw - template - GLM_INLINE glm::vec<4, T, Q> wyzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.z, v.w); - } - - // wywx - template - GLM_INLINE glm::vec<4, T, Q> wywx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.w, v.x); - } - - // wywy - template - GLM_INLINE glm::vec<4, T, Q> wywy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.w, v.y); - } - - // wywz - template - GLM_INLINE glm::vec<4, T, Q> wywz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.w, v.z); - } - - // wyww - template - GLM_INLINE glm::vec<4, T, Q> wyww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.w, v.w); - } - - // wzxx - template - GLM_INLINE glm::vec<4, T, Q> wzxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.x, v.x); - } - - // wzxy - template - GLM_INLINE glm::vec<4, T, Q> wzxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.x, v.y); - } - - // wzxz - template - GLM_INLINE glm::vec<4, T, Q> wzxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.x, v.z); - } - - // wzxw - template - GLM_INLINE glm::vec<4, T, Q> wzxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.x, v.w); - } - - // wzyx - template - GLM_INLINE glm::vec<4, T, Q> wzyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.y, v.x); - } - - // wzyy - template - GLM_INLINE glm::vec<4, T, Q> wzyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.y, v.y); - } - - // wzyz - template - GLM_INLINE glm::vec<4, T, Q> wzyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.y, v.z); - } - - // wzyw - template - GLM_INLINE glm::vec<4, T, Q> wzyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.y, v.w); - } - - // wzzx - template - GLM_INLINE glm::vec<4, T, Q> wzzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.z, v.x); - } - - // wzzy - template - GLM_INLINE glm::vec<4, T, Q> wzzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.z, v.y); - } - - // wzzz - template - GLM_INLINE glm::vec<4, T, Q> wzzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.z, v.z); - } - - // wzzw - template - GLM_INLINE glm::vec<4, T, Q> wzzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.z, v.w); - } - - // wzwx - template - GLM_INLINE glm::vec<4, T, Q> wzwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.w, v.x); - } - - // wzwy - template - GLM_INLINE glm::vec<4, T, Q> wzwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.w, v.y); - } - - // wzwz - template - GLM_INLINE glm::vec<4, T, Q> wzwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.w, v.z); - } - - // wzww - template - GLM_INLINE glm::vec<4, T, Q> wzww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.w, v.w); - } - - // wwxx - template - GLM_INLINE glm::vec<4, T, Q> wwxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.x, v.x); - } - - // wwxy - template - GLM_INLINE glm::vec<4, T, Q> wwxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.x, v.y); - } - - // wwxz - template - GLM_INLINE glm::vec<4, T, Q> wwxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.x, v.z); - } - - // wwxw - template - GLM_INLINE glm::vec<4, T, Q> wwxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.x, v.w); - } - - // wwyx - template - GLM_INLINE glm::vec<4, T, Q> wwyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.y, v.x); - } - - // wwyy - template - GLM_INLINE glm::vec<4, T, Q> wwyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.y, v.y); - } - - // wwyz - template - GLM_INLINE glm::vec<4, T, Q> wwyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.y, v.z); - } - - // wwyw - template - GLM_INLINE glm::vec<4, T, Q> wwyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.y, v.w); - } - - // wwzx - template - GLM_INLINE glm::vec<4, T, Q> wwzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.z, v.x); - } - - // wwzy - template - GLM_INLINE glm::vec<4, T, Q> wwzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.z, v.y); - } - - // wwzz - template - GLM_INLINE glm::vec<4, T, Q> wwzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.z, v.z); - } - - // wwzw - template - GLM_INLINE glm::vec<4, T, Q> wwzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.z, v.w); - } - - // wwwx - template - GLM_INLINE glm::vec<4, T, Q> wwwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.w, v.x); - } - - // wwwy - template - GLM_INLINE glm::vec<4, T, Q> wwwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.w, v.y); - } - - // wwwz - template - GLM_INLINE glm::vec<4, T, Q> wwwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.w, v.z); - } - - // wwww - template - GLM_INLINE glm::vec<4, T, Q> wwww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.w, v.w); - } - -} diff --git a/third_party/glm/gtx/vector_angle.hpp b/third_party/glm/gtx/vector_angle.hpp deleted file mode 100755 index 9ae4371..0000000 --- a/third_party/glm/gtx/vector_angle.hpp +++ /dev/null @@ -1,57 +0,0 @@ -/// @ref gtx_vector_angle -/// @file glm/gtx/vector_angle.hpp -/// -/// @see core (dependence) -/// @see gtx_quaternion (dependence) -/// @see gtx_epsilon (dependence) -/// -/// @defgroup gtx_vector_angle GLM_GTX_vector_angle -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Compute angle between vectors - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/epsilon.hpp" -#include "../gtx/quaternion.hpp" -#include "../gtx/rotate_vector.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_vector_angle is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_vector_angle extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_vector_angle - /// @{ - - //! Returns the absolute angle between two vectors. - //! Parameters need to be normalized. - /// @see gtx_vector_angle extension. - template - GLM_FUNC_DECL T angle(vec const& x, vec const& y); - - //! Returns the oriented angle between two 2d vectors. - //! Parameters need to be normalized. - /// @see gtx_vector_angle extension. - template - GLM_FUNC_DECL T orientedAngle(vec<2, T, Q> const& x, vec<2, T, Q> const& y); - - //! Returns the oriented angle between two 3d vectors based from a reference axis. - //! Parameters need to be normalized. - /// @see gtx_vector_angle extension. - template - GLM_FUNC_DECL T orientedAngle(vec<3, T, Q> const& x, vec<3, T, Q> const& y, vec<3, T, Q> const& ref); - - /// @} -}// namespace glm - -#include "vector_angle.inl" diff --git a/third_party/glm/gtx/vector_angle.inl b/third_party/glm/gtx/vector_angle.inl deleted file mode 100755 index a1f957a..0000000 --- a/third_party/glm/gtx/vector_angle.inl +++ /dev/null @@ -1,44 +0,0 @@ -/// @ref gtx_vector_angle - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType angle - ( - genType const& x, - genType const& y - ) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'angle' only accept floating-point inputs"); - return acos(clamp(dot(x, y), genType(-1), genType(1))); - } - - template - GLM_FUNC_QUALIFIER T angle(vec const& x, vec const& y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'angle' only accept floating-point inputs"); - return acos(clamp(dot(x, y), T(-1), T(1))); - } - - //! \todo epsilon is hard coded to 0.01 - template - GLM_FUNC_QUALIFIER T orientedAngle(vec<2, T, Q> const& x, vec<2, T, Q> const& y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'orientedAngle' only accept floating-point inputs"); - T const Angle(acos(clamp(dot(x, y), T(-1), T(1)))); - - if(all(epsilonEqual(y, glm::rotate(x, Angle), T(0.0001)))) - return Angle; - else - return -Angle; - } - - template - GLM_FUNC_QUALIFIER T orientedAngle(vec<3, T, Q> const& x, vec<3, T, Q> const& y, vec<3, T, Q> const& ref) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'orientedAngle' only accept floating-point inputs"); - - T const Angle(acos(clamp(dot(x, y), T(-1), T(1)))); - return mix(Angle, -Angle, dot(ref, cross(x, y)) < T(0)); - } -}//namespace glm diff --git a/third_party/glm/gtx/vector_query.hpp b/third_party/glm/gtx/vector_query.hpp deleted file mode 100755 index 77c7b97..0000000 --- a/third_party/glm/gtx/vector_query.hpp +++ /dev/null @@ -1,66 +0,0 @@ -/// @ref gtx_vector_query -/// @file glm/gtx/vector_query.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_vector_query GLM_GTX_vector_query -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Query informations of vector types - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_vector_query is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_vector_query extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_vector_query - /// @{ - - //! Check whether two vectors are collinears. - /// @see gtx_vector_query extensions. - template - GLM_FUNC_DECL bool areCollinear(vec const& v0, vec const& v1, T const& epsilon); - - //! Check whether two vectors are orthogonals. - /// @see gtx_vector_query extensions. - template - GLM_FUNC_DECL bool areOrthogonal(vec const& v0, vec const& v1, T const& epsilon); - - //! Check whether a vector is normalized. - /// @see gtx_vector_query extensions. - template - GLM_FUNC_DECL bool isNormalized(vec const& v, T const& epsilon); - - //! Check whether a vector is null. - /// @see gtx_vector_query extensions. - template - GLM_FUNC_DECL bool isNull(vec const& v, T const& epsilon); - - //! Check whether a each component of a vector is null. - /// @see gtx_vector_query extensions. - template - GLM_FUNC_DECL vec isCompNull(vec const& v, T const& epsilon); - - //! Check whether two vectors are orthonormal. - /// @see gtx_vector_query extensions. - template - GLM_FUNC_DECL bool areOrthonormal(vec const& v0, vec const& v1, T const& epsilon); - - /// @} -}// namespace glm - -#include "vector_query.inl" diff --git a/third_party/glm/gtx/vector_query.inl b/third_party/glm/gtx/vector_query.inl deleted file mode 100755 index d1a5c9b..0000000 --- a/third_party/glm/gtx/vector_query.inl +++ /dev/null @@ -1,154 +0,0 @@ -/// @ref gtx_vector_query - -#include - -namespace glm{ -namespace detail -{ - template - struct compute_areCollinear{}; - - template - struct compute_areCollinear<2, T, Q> - { - GLM_FUNC_QUALIFIER static bool call(vec<2, T, Q> const& v0, vec<2, T, Q> const& v1, T const& epsilon) - { - return length(cross(vec<3, T, Q>(v0, static_cast(0)), vec<3, T, Q>(v1, static_cast(0)))) < epsilon; - } - }; - - template - struct compute_areCollinear<3, T, Q> - { - GLM_FUNC_QUALIFIER static bool call(vec<3, T, Q> const& v0, vec<3, T, Q> const& v1, T const& epsilon) - { - return length(cross(v0, v1)) < epsilon; - } - }; - - template - struct compute_areCollinear<4, T, Q> - { - GLM_FUNC_QUALIFIER static bool call(vec<4, T, Q> const& v0, vec<4, T, Q> const& v1, T const& epsilon) - { - return length(cross(vec<3, T, Q>(v0), vec<3, T, Q>(v1))) < epsilon; - } - }; - - template - struct compute_isCompNull{}; - - template - struct compute_isCompNull<2, T, Q> - { - GLM_FUNC_QUALIFIER static vec<2, bool, Q> call(vec<2, T, Q> const& v, T const& epsilon) - { - return vec<2, bool, Q>( - (abs(v.x) < epsilon), - (abs(v.y) < epsilon)); - } - }; - - template - struct compute_isCompNull<3, T, Q> - { - GLM_FUNC_QUALIFIER static vec<3, bool, Q> call(vec<3, T, Q> const& v, T const& epsilon) - { - return vec<3, bool, Q>( - (abs(v.x) < epsilon), - (abs(v.y) < epsilon), - (abs(v.z) < epsilon)); - } - }; - - template - struct compute_isCompNull<4, T, Q> - { - GLM_FUNC_QUALIFIER static vec<4, bool, Q> call(vec<4, T, Q> const& v, T const& epsilon) - { - return vec<4, bool, Q>( - (abs(v.x) < epsilon), - (abs(v.y) < epsilon), - (abs(v.z) < epsilon), - (abs(v.w) < epsilon)); - } - }; - -}//namespace detail - - template - GLM_FUNC_QUALIFIER bool areCollinear(vec const& v0, vec const& v1, T const& epsilon) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'areCollinear' only accept floating-point inputs"); - - return detail::compute_areCollinear::call(v0, v1, epsilon); - } - - template - GLM_FUNC_QUALIFIER bool areOrthogonal(vec const& v0, vec const& v1, T const& epsilon) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'areOrthogonal' only accept floating-point inputs"); - - return abs(dot(v0, v1)) <= max( - static_cast(1), - length(v0)) * max(static_cast(1), length(v1)) * epsilon; - } - - template - GLM_FUNC_QUALIFIER bool isNormalized(vec const& v, T const& epsilon) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isNormalized' only accept floating-point inputs"); - - return abs(length(v) - static_cast(1)) <= static_cast(2) * epsilon; - } - - template - GLM_FUNC_QUALIFIER bool isNull(vec const& v, T const& epsilon) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isNull' only accept floating-point inputs"); - - return length(v) <= epsilon; - } - - template - GLM_FUNC_QUALIFIER vec isCompNull(vec const& v, T const& epsilon) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isCompNull' only accept floating-point inputs"); - - return detail::compute_isCompNull::call(v, epsilon); - } - - template - GLM_FUNC_QUALIFIER vec<2, bool, Q> isCompNull(vec<2, T, Q> const& v, T const& epsilon) - { - return vec<2, bool, Q>( - abs(v.x) < epsilon, - abs(v.y) < epsilon); - } - - template - GLM_FUNC_QUALIFIER vec<3, bool, Q> isCompNull(vec<3, T, Q> const& v, T const& epsilon) - { - return vec<3, bool, Q>( - abs(v.x) < epsilon, - abs(v.y) < epsilon, - abs(v.z) < epsilon); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> isCompNull(vec<4, T, Q> const& v, T const& epsilon) - { - return vec<4, bool, Q>( - abs(v.x) < epsilon, - abs(v.y) < epsilon, - abs(v.z) < epsilon, - abs(v.w) < epsilon); - } - - template - GLM_FUNC_QUALIFIER bool areOrthonormal(vec const& v0, vec const& v1, T const& epsilon) - { - return isNormalized(v0, epsilon) && isNormalized(v1, epsilon) && (abs(dot(v0, v1)) <= epsilon); - } - -}//namespace glm diff --git a/third_party/glm/gtx/wrap.hpp b/third_party/glm/gtx/wrap.hpp deleted file mode 100755 index 02c5196..0000000 --- a/third_party/glm/gtx/wrap.hpp +++ /dev/null @@ -1,55 +0,0 @@ -/// @ref gtx_wrap -/// @file glm/gtx/wrap.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_wrap GLM_GTX_wrap -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Wrapping mode of texture coordinates. - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_wrap is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_wrap extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_wrap - /// @{ - - /// Simulate GL_CLAMP OpenGL wrap mode - /// @see gtx_wrap extension. - template - GLM_FUNC_DECL genType clamp(genType const& Texcoord); - - /// Simulate GL_REPEAT OpenGL wrap mode - /// @see gtx_wrap extension. - template - GLM_FUNC_DECL genType repeat(genType const& Texcoord); - - /// Simulate GL_MIRRORED_REPEAT OpenGL wrap mode - /// @see gtx_wrap extension. - template - GLM_FUNC_DECL genType mirrorClamp(genType const& Texcoord); - - /// Simulate GL_MIRROR_REPEAT OpenGL wrap mode - /// @see gtx_wrap extension. - template - GLM_FUNC_DECL genType mirrorRepeat(genType const& Texcoord); - - /// @} -}// namespace glm - -#include "wrap.inl" diff --git a/third_party/glm/gtx/wrap.inl b/third_party/glm/gtx/wrap.inl deleted file mode 100755 index 409a316..0000000 --- a/third_party/glm/gtx/wrap.inl +++ /dev/null @@ -1,57 +0,0 @@ -/// @ref gtx_wrap - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec clamp(vec const& Texcoord) - { - return glm::clamp(Texcoord, vec(0), vec(1)); - } - - template - GLM_FUNC_QUALIFIER genType clamp(genType const& Texcoord) - { - return clamp(vec<1, genType, defaultp>(Texcoord)).x; - } - - template - GLM_FUNC_QUALIFIER vec repeat(vec const& Texcoord) - { - return glm::fract(Texcoord); - } - - template - GLM_FUNC_QUALIFIER genType repeat(genType const& Texcoord) - { - return repeat(vec<1, genType, defaultp>(Texcoord)).x; - } - - template - GLM_FUNC_QUALIFIER vec mirrorClamp(vec const& Texcoord) - { - return glm::fract(glm::abs(Texcoord)); - } - - template - GLM_FUNC_QUALIFIER genType mirrorClamp(genType const& Texcoord) - { - return mirrorClamp(vec<1, genType, defaultp>(Texcoord)).x; - } - - template - GLM_FUNC_QUALIFIER vec mirrorRepeat(vec const& Texcoord) - { - vec const Abs = glm::abs(Texcoord); - vec const Clamp = glm::mod(glm::floor(Abs), vec(2)); - vec const Floor = glm::floor(Abs); - vec const Rest = Abs - Floor; - vec const Mirror = Clamp + Rest; - return mix(Rest, vec(1) - Rest, glm::greaterThanEqual(Mirror, vec(1))); - } - - template - GLM_FUNC_QUALIFIER genType mirrorRepeat(genType const& Texcoord) - { - return mirrorRepeat(vec<1, genType, defaultp>(Texcoord)).x; - } -}//namespace glm diff --git a/third_party/glm/integer.hpp b/third_party/glm/integer.hpp deleted file mode 100755 index 8817db3..0000000 --- a/third_party/glm/integer.hpp +++ /dev/null @@ -1,212 +0,0 @@ -/// @ref core -/// @file glm/integer.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.8 Integer Functions -/// -/// @defgroup core_func_integer Integer functions -/// @ingroup core -/// -/// Provides GLSL functions on integer types -/// -/// These all operate component-wise. The description is per component. -/// The notation [a, b] means the set of bits from bit-number a through bit-number -/// b, inclusive. The lowest-order bit is bit 0. -/// -/// Include to use these core features. - -#pragma once - -#include "detail/qualifier.hpp" -#include "common.hpp" -#include "vector_relational.hpp" - -namespace glm -{ - /// @addtogroup core_func_integer - /// @{ - - /// Adds 32-bit unsigned integer x and y, returning the sum - /// modulo pow(2, 32). The value carry is set to 0 if the sum was - /// less than pow(2, 32), or to 1 otherwise. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// - /// @see GLSL uaddCarry man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec uaddCarry( - vec const& x, - vec const& y, - vec & carry); - - /// Subtracts the 32-bit unsigned integer y from x, returning - /// the difference if non-negative, or pow(2, 32) plus the difference - /// otherwise. The value borrow is set to 0 if x >= y, or to 1 otherwise. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// - /// @see GLSL usubBorrow man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec usubBorrow( - vec const& x, - vec const& y, - vec & borrow); - - /// Multiplies 32-bit integers x and y, producing a 64-bit - /// result. The 32 least-significant bits are returned in lsb. - /// The 32 most-significant bits are returned in msb. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// - /// @see GLSL umulExtended man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL void umulExtended( - vec const& x, - vec const& y, - vec & msb, - vec & lsb); - - /// Multiplies 32-bit integers x and y, producing a 64-bit - /// result. The 32 least-significant bits are returned in lsb. - /// The 32 most-significant bits are returned in msb. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// - /// @see GLSL imulExtended man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL void imulExtended( - vec const& x, - vec const& y, - vec & msb, - vec & lsb); - - /// Extracts bits [offset, offset + bits - 1] from value, - /// returning them in the least significant bits of the result. - /// For unsigned data types, the most significant bits of the - /// result will be set to zero. For signed data types, the - /// most significant bits will be set to the value of bit offset + base - 1. - /// - /// If bits is zero, the result will be zero. The result will be - /// undefined if offset or bits is negative, or if the sum of - /// offset and bits is greater than the number of bits used - /// to store the operand. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Signed or unsigned integer scalar types. - /// - /// @see GLSL bitfieldExtract man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec bitfieldExtract( - vec const& Value, - int Offset, - int Bits); - - /// Returns the insertion the bits least-significant bits of insert into base. - /// - /// The result will have bits [offset, offset + bits - 1] taken - /// from bits [0, bits - 1] of insert, and all other bits taken - /// directly from the corresponding bits of base. If bits is - /// zero, the result will simply be base. The result will be - /// undefined if offset or bits is negative, or if the sum of - /// offset and bits is greater than the number of bits used to - /// store the operand. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Signed or unsigned integer scalar or vector types. - /// - /// @see GLSL bitfieldInsert man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec bitfieldInsert( - vec const& Base, - vec const& Insert, - int Offset, - int Bits); - - /// Returns the reversal of the bits of value. - /// The bit numbered n of the result will be taken from bit (bits - 1) - n of value, - /// where bits is the total number of bits used to represent value. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Signed or unsigned integer scalar or vector types. - /// - /// @see GLSL bitfieldReverse man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec bitfieldReverse(vec const& v); - - /// Returns the number of bits set to 1 in the binary representation of value. - /// - /// @tparam genType Signed or unsigned integer scalar or vector types. - /// - /// @see GLSL bitCount man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL int bitCount(genType v); - - /// Returns the number of bits set to 1 in the binary representation of value. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Signed or unsigned integer scalar or vector types. - /// - /// @see GLSL bitCount man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec bitCount(vec const& v); - - /// Returns the bit number of the least significant bit set to - /// 1 in the binary representation of value. - /// If value is zero, -1 will be returned. - /// - /// @tparam genIUType Signed or unsigned integer scalar types. - /// - /// @see GLSL findLSB man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL int findLSB(genIUType x); - - /// Returns the bit number of the least significant bit set to - /// 1 in the binary representation of value. - /// If value is zero, -1 will be returned. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Signed or unsigned integer scalar types. - /// - /// @see GLSL findLSB man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec findLSB(vec const& v); - - /// Returns the bit number of the most significant bit in the binary representation of value. - /// For positive integers, the result will be the bit number of the most significant bit set to 1. - /// For negative integers, the result will be the bit number of the most significant - /// bit set to 0. For a value of zero or negative one, -1 will be returned. - /// - /// @tparam genIUType Signed or unsigned integer scalar types. - /// - /// @see GLSL findMSB man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL int findMSB(genIUType x); - - /// Returns the bit number of the most significant bit in the binary representation of value. - /// For positive integers, the result will be the bit number of the most significant bit set to 1. - /// For negative integers, the result will be the bit number of the most significant - /// bit set to 0. For a value of zero or negative one, -1 will be returned. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Signed or unsigned integer scalar types. - /// - /// @see GLSL findMSB man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec findMSB(vec const& v); - - /// @} -}//namespace glm - -#include "detail/func_integer.inl" diff --git a/third_party/glm/mat2x2.hpp b/third_party/glm/mat2x2.hpp deleted file mode 100755 index 96bec96..0000000 --- a/third_party/glm/mat2x2.hpp +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref core -/// @file glm/mat2x2.hpp - -#pragma once -#include "./ext/matrix_double2x2.hpp" -#include "./ext/matrix_double2x2_precision.hpp" -#include "./ext/matrix_float2x2.hpp" -#include "./ext/matrix_float2x2_precision.hpp" - diff --git a/third_party/glm/mat2x3.hpp b/third_party/glm/mat2x3.hpp deleted file mode 100755 index d68dc25..0000000 --- a/third_party/glm/mat2x3.hpp +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref core -/// @file glm/mat2x3.hpp - -#pragma once -#include "./ext/matrix_double2x3.hpp" -#include "./ext/matrix_double2x3_precision.hpp" -#include "./ext/matrix_float2x3.hpp" -#include "./ext/matrix_float2x3_precision.hpp" - diff --git a/third_party/glm/mat2x4.hpp b/third_party/glm/mat2x4.hpp deleted file mode 100755 index b04b738..0000000 --- a/third_party/glm/mat2x4.hpp +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref core -/// @file glm/mat2x4.hpp - -#pragma once -#include "./ext/matrix_double2x4.hpp" -#include "./ext/matrix_double2x4_precision.hpp" -#include "./ext/matrix_float2x4.hpp" -#include "./ext/matrix_float2x4_precision.hpp" - diff --git a/third_party/glm/mat3x2.hpp b/third_party/glm/mat3x2.hpp deleted file mode 100755 index c853153..0000000 --- a/third_party/glm/mat3x2.hpp +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref core -/// @file glm/mat3x2.hpp - -#pragma once -#include "./ext/matrix_double3x2.hpp" -#include "./ext/matrix_double3x2_precision.hpp" -#include "./ext/matrix_float3x2.hpp" -#include "./ext/matrix_float3x2_precision.hpp" - diff --git a/third_party/glm/mat3x3.hpp b/third_party/glm/mat3x3.hpp deleted file mode 100755 index fd4fa31..0000000 --- a/third_party/glm/mat3x3.hpp +++ /dev/null @@ -1,8 +0,0 @@ -/// @ref core -/// @file glm/mat3x3.hpp - -#pragma once -#include "./ext/matrix_double3x3.hpp" -#include "./ext/matrix_double3x3_precision.hpp" -#include "./ext/matrix_float3x3.hpp" -#include "./ext/matrix_float3x3_precision.hpp" diff --git a/third_party/glm/mat3x4.hpp b/third_party/glm/mat3x4.hpp deleted file mode 100755 index 6342bf5..0000000 --- a/third_party/glm/mat3x4.hpp +++ /dev/null @@ -1,8 +0,0 @@ -/// @ref core -/// @file glm/mat3x4.hpp - -#pragma once -#include "./ext/matrix_double3x4.hpp" -#include "./ext/matrix_double3x4_precision.hpp" -#include "./ext/matrix_float3x4.hpp" -#include "./ext/matrix_float3x4_precision.hpp" diff --git a/third_party/glm/mat4x2.hpp b/third_party/glm/mat4x2.hpp deleted file mode 100755 index e013e46..0000000 --- a/third_party/glm/mat4x2.hpp +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref core -/// @file glm/mat4x2.hpp - -#pragma once -#include "./ext/matrix_double4x2.hpp" -#include "./ext/matrix_double4x2_precision.hpp" -#include "./ext/matrix_float4x2.hpp" -#include "./ext/matrix_float4x2_precision.hpp" - diff --git a/third_party/glm/mat4x3.hpp b/third_party/glm/mat4x3.hpp deleted file mode 100755 index 205725a..0000000 --- a/third_party/glm/mat4x3.hpp +++ /dev/null @@ -1,8 +0,0 @@ -/// @ref core -/// @file glm/mat4x3.hpp - -#pragma once -#include "./ext/matrix_double4x3.hpp" -#include "./ext/matrix_double4x3_precision.hpp" -#include "./ext/matrix_float4x3.hpp" -#include "./ext/matrix_float4x3_precision.hpp" diff --git a/third_party/glm/mat4x4.hpp b/third_party/glm/mat4x4.hpp deleted file mode 100755 index 3515f7f..0000000 --- a/third_party/glm/mat4x4.hpp +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref core -/// @file glm/mat4x4.hpp - -#pragma once -#include "./ext/matrix_double4x4.hpp" -#include "./ext/matrix_double4x4_precision.hpp" -#include "./ext/matrix_float4x4.hpp" -#include "./ext/matrix_float4x4_precision.hpp" - diff --git a/third_party/glm/matrix.hpp b/third_party/glm/matrix.hpp deleted file mode 100755 index 6badf53..0000000 --- a/third_party/glm/matrix.hpp +++ /dev/null @@ -1,161 +0,0 @@ -/// @ref core -/// @file glm/matrix.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions -/// -/// @defgroup core_func_matrix Matrix functions -/// @ingroup core -/// -/// Provides GLSL matrix functions. -/// -/// Include to use these core features. - -#pragma once - -// Dependencies -#include "detail/qualifier.hpp" -#include "detail/setup.hpp" -#include "vec2.hpp" -#include "vec3.hpp" -#include "vec4.hpp" -#include "mat2x2.hpp" -#include "mat2x3.hpp" -#include "mat2x4.hpp" -#include "mat3x2.hpp" -#include "mat3x3.hpp" -#include "mat3x4.hpp" -#include "mat4x2.hpp" -#include "mat4x3.hpp" -#include "mat4x4.hpp" - -namespace glm { -namespace detail -{ - template - struct outerProduct_trait{}; - - template - struct outerProduct_trait<2, 2, T, Q> - { - typedef mat<2, 2, T, Q> type; - }; - - template - struct outerProduct_trait<2, 3, T, Q> - { - typedef mat<3, 2, T, Q> type; - }; - - template - struct outerProduct_trait<2, 4, T, Q> - { - typedef mat<4, 2, T, Q> type; - }; - - template - struct outerProduct_trait<3, 2, T, Q> - { - typedef mat<2, 3, T, Q> type; - }; - - template - struct outerProduct_trait<3, 3, T, Q> - { - typedef mat<3, 3, T, Q> type; - }; - - template - struct outerProduct_trait<3, 4, T, Q> - { - typedef mat<4, 3, T, Q> type; - }; - - template - struct outerProduct_trait<4, 2, T, Q> - { - typedef mat<2, 4, T, Q> type; - }; - - template - struct outerProduct_trait<4, 3, T, Q> - { - typedef mat<3, 4, T, Q> type; - }; - - template - struct outerProduct_trait<4, 4, T, Q> - { - typedef mat<4, 4, T, Q> type; - }; -}//namespace detail - - /// @addtogroup core_func_matrix - /// @{ - - /// Multiply matrix x by matrix y component-wise, i.e., - /// result[i][j] is the scalar product of x[i][j] and y[i][j]. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number a column - /// @tparam R Integer between 1 and 4 included that qualify the number a row - /// @tparam T Floating-point or signed integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL matrixCompMult man page - /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions - template - GLM_FUNC_DECL mat matrixCompMult(mat const& x, mat const& y); - - /// Treats the first parameter c as a column vector - /// and the second parameter r as a row vector - /// and does a linear algebraic matrix multiply c * r. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number a column - /// @tparam R Integer between 1 and 4 included that qualify the number a row - /// @tparam T Floating-point or signed integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL outerProduct man page - /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions - template - GLM_FUNC_DECL typename detail::outerProduct_trait::type outerProduct(vec const& c, vec const& r); - - /// Returns the transposed matrix of x - /// - /// @tparam C Integer between 1 and 4 included that qualify the number a column - /// @tparam R Integer between 1 and 4 included that qualify the number a row - /// @tparam T Floating-point or signed integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL transpose man page - /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions - template - GLM_FUNC_DECL typename mat::transpose_type transpose(mat const& x); - - /// Return the determinant of a squared matrix. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number a column - /// @tparam R Integer between 1 and 4 included that qualify the number a row - /// @tparam T Floating-point or signed integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL determinant man page - /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions - template - GLM_FUNC_DECL T determinant(mat const& m); - - /// Return the inverse of a squared matrix. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number a column - /// @tparam R Integer between 1 and 4 included that qualify the number a row - /// @tparam T Floating-point or signed integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL inverse man page - /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions - template - GLM_FUNC_DECL mat inverse(mat const& m); - - /// @} -}//namespace glm - -#include "detail/func_matrix.inl" diff --git a/third_party/glm/packing.hpp b/third_party/glm/packing.hpp deleted file mode 100755 index ca83ac1..0000000 --- a/third_party/glm/packing.hpp +++ /dev/null @@ -1,173 +0,0 @@ -/// @ref core -/// @file glm/packing.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions -/// @see gtc_packing -/// -/// @defgroup core_func_packing Floating-Point Pack and Unpack Functions -/// @ingroup core -/// -/// Provides GLSL functions to pack and unpack half, single and double-precision floating point values into more compact integer types. -/// -/// These functions do not operate component-wise, rather as described in each case. -/// -/// Include to use these core features. - -#pragma once - -#include "./ext/vector_uint2.hpp" -#include "./ext/vector_float2.hpp" -#include "./ext/vector_float4.hpp" - -namespace glm -{ - /// @addtogroup core_func_packing - /// @{ - - /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packUnorm2x16: round(clamp(c, 0, +1) * 65535.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see GLSL packUnorm2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint packUnorm2x16(vec2 const& v); - - /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packSnorm2x16: round(clamp(v, -1, +1) * 32767.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see GLSL packSnorm2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint packSnorm2x16(vec2 const& v); - - /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packUnorm4x8: round(clamp(c, 0, +1) * 255.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see GLSL packUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint packUnorm4x8(vec4 const& v); - - /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packSnorm4x8: round(clamp(c, -1, +1) * 127.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see GLSL packSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint packSnorm4x8(vec4 const& v); - - /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackUnorm2x16: f / 65535.0 - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see GLSL unpackUnorm2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec2 unpackUnorm2x16(uint p); - - /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm2x16: clamp(f / 32767.0, -1, +1) - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see GLSL unpackSnorm2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec2 unpackSnorm2x16(uint p); - - /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackUnorm4x8: f / 255.0 - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see GLSL unpackUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec4 unpackUnorm4x8(uint p); - - /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm4x8: clamp(f / 127.0, -1, +1) - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see GLSL unpackSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec4 unpackSnorm4x8(uint p); - - /// Returns a double-qualifier value obtained by packing the components of v into a 64-bit value. - /// If an IEEE 754 Inf or NaN is created, it will not signal, and the resulting floating point value is unspecified. - /// Otherwise, the bit- level representation of v is preserved. - /// The first vector component specifies the 32 least significant bits; - /// the second component specifies the 32 most significant bits. - /// - /// @see GLSL packDouble2x32 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL double packDouble2x32(uvec2 const& v); - - /// Returns a two-component unsigned integer vector representation of v. - /// The bit-level representation of v is preserved. - /// The first component of the vector contains the 32 least significant bits of the double; - /// the second component consists the 32 most significant bits. - /// - /// @see GLSL unpackDouble2x32 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uvec2 unpackDouble2x32(double v); - - /// Returns an unsigned integer obtained by converting the components of a two-component floating-point vector - /// to the 16-bit floating-point representation found in the OpenGL Specification, - /// and then packing these two 16- bit integers into a 32-bit unsigned integer. - /// The first vector component specifies the 16 least-significant bits of the result; - /// the second component specifies the 16 most-significant bits. - /// - /// @see GLSL packHalf2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint packHalf2x16(vec2 const& v); - - /// Returns a two-component floating-point vector with components obtained by unpacking a 32-bit unsigned integer into a pair of 16-bit values, - /// interpreting those values as 16-bit floating-point numbers according to the OpenGL Specification, - /// and converting them to 32-bit floating-point values. - /// The first component of the vector is obtained from the 16 least-significant bits of v; - /// the second component is obtained from the 16 most-significant bits of v. - /// - /// @see GLSL unpackHalf2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec2 unpackHalf2x16(uint v); - - /// @} -}//namespace glm - -#include "detail/func_packing.inl" diff --git a/third_party/glm/simd/common.h b/third_party/glm/simd/common.h deleted file mode 100755 index 9b017cb..0000000 --- a/third_party/glm/simd/common.h +++ /dev/null @@ -1,240 +0,0 @@ -/// @ref simd -/// @file glm/simd/common.h - -#pragma once - -#include "platform.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_add(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_add_ps(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_add(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_add_ss(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_sub(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_sub_ps(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_sub(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_sub_ss(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_mul(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_mul_ps(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_mul(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_mul_ss(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_div(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_div_ps(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_div(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_div_ss(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_div_lowp(glm_f32vec4 a, glm_f32vec4 b) -{ - return glm_vec4_mul(a, _mm_rcp_ps(b)); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_swizzle_xyzw(glm_f32vec4 a) -{ -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - return _mm_permute_ps(a, _MM_SHUFFLE(3, 2, 1, 0)); -# else - return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 2, 1, 0)); -# endif -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c) -{ -# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && !(GLM_COMPILER & GLM_COMPILER_CLANG) - return _mm_fmadd_ss(a, b, c); -# else - return _mm_add_ss(_mm_mul_ss(a, b), c); -# endif -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c) -{ -# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && !(GLM_COMPILER & GLM_COMPILER_CLANG) - return _mm_fmadd_ps(a, b, c); -# else - return glm_vec4_add(glm_vec4_mul(a, b), c); -# endif -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_abs(glm_f32vec4 x) -{ - return _mm_and_ps(x, _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF))); -} - -GLM_FUNC_QUALIFIER glm_ivec4 glm_ivec4_abs(glm_ivec4 x) -{ -# if GLM_ARCH & GLM_ARCH_SSSE3_BIT - return _mm_sign_epi32(x, x); -# else - glm_ivec4 const sgn0 = _mm_srai_epi32(x, 31); - glm_ivec4 const inv0 = _mm_xor_si128(x, sgn0); - glm_ivec4 const sub0 = _mm_sub_epi32(inv0, sgn0); - return sub0; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_sign(glm_vec4 x) -{ - glm_vec4 const zro0 = _mm_setzero_ps(); - glm_vec4 const cmp0 = _mm_cmplt_ps(x, zro0); - glm_vec4 const cmp1 = _mm_cmpgt_ps(x, zro0); - glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(-1.0f)); - glm_vec4 const and1 = _mm_and_ps(cmp1, _mm_set1_ps(1.0f)); - glm_vec4 const or0 = _mm_or_ps(and0, and1); - return or0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_round(glm_vec4 x) -{ -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - return _mm_round_ps(x, _MM_FROUND_TO_NEAREST_INT); -# else - glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(int(0x80000000))); - glm_vec4 const and0 = _mm_and_ps(sgn0, x); - glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f)); - glm_vec4 const add0 = glm_vec4_add(x, or0); - glm_vec4 const sub0 = glm_vec4_sub(add0, or0); - return sub0; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_floor(glm_vec4 x) -{ -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - return _mm_floor_ps(x); -# else - glm_vec4 const rnd0 = glm_vec4_round(x); - glm_vec4 const cmp0 = _mm_cmplt_ps(x, rnd0); - glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); - glm_vec4 const sub0 = glm_vec4_sub(rnd0, and0); - return sub0; -# endif -} - -/* trunc TODO -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_trunc(glm_vec4 x) -{ - return glm_vec4(); -} -*/ - -//roundEven -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_roundEven(glm_vec4 x) -{ - glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(int(0x80000000))); - glm_vec4 const and0 = _mm_and_ps(sgn0, x); - glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f)); - glm_vec4 const add0 = glm_vec4_add(x, or0); - glm_vec4 const sub0 = glm_vec4_sub(add0, or0); - return sub0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_ceil(glm_vec4 x) -{ -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - return _mm_ceil_ps(x); -# else - glm_vec4 const rnd0 = glm_vec4_round(x); - glm_vec4 const cmp0 = _mm_cmpgt_ps(x, rnd0); - glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); - glm_vec4 const add0 = glm_vec4_add(rnd0, and0); - return add0; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_fract(glm_vec4 x) -{ - glm_vec4 const flr0 = glm_vec4_floor(x); - glm_vec4 const sub0 = glm_vec4_sub(x, flr0); - return sub0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mod(glm_vec4 x, glm_vec4 y) -{ - glm_vec4 const div0 = glm_vec4_div(x, y); - glm_vec4 const flr0 = glm_vec4_floor(div0); - glm_vec4 const mul0 = glm_vec4_mul(y, flr0); - glm_vec4 const sub0 = glm_vec4_sub(x, mul0); - return sub0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_clamp(glm_vec4 v, glm_vec4 minVal, glm_vec4 maxVal) -{ - glm_vec4 const min0 = _mm_min_ps(v, maxVal); - glm_vec4 const max0 = _mm_max_ps(min0, minVal); - return max0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mix(glm_vec4 v1, glm_vec4 v2, glm_vec4 a) -{ - glm_vec4 const sub0 = glm_vec4_sub(_mm_set1_ps(1.0f), a); - glm_vec4 const mul0 = glm_vec4_mul(v1, sub0); - glm_vec4 const mad0 = glm_vec4_fma(v2, a, mul0); - return mad0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_step(glm_vec4 edge, glm_vec4 x) -{ - glm_vec4 const cmp = _mm_cmple_ps(x, edge); - return _mm_movemask_ps(cmp) == 0 ? _mm_set1_ps(1.0f) : _mm_setzero_ps(); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_smoothstep(glm_vec4 edge0, glm_vec4 edge1, glm_vec4 x) -{ - glm_vec4 const sub0 = glm_vec4_sub(x, edge0); - glm_vec4 const sub1 = glm_vec4_sub(edge1, edge0); - glm_vec4 const div0 = glm_vec4_sub(sub0, sub1); - glm_vec4 const clp0 = glm_vec4_clamp(div0, _mm_setzero_ps(), _mm_set1_ps(1.0f)); - glm_vec4 const mul0 = glm_vec4_mul(_mm_set1_ps(2.0f), clp0); - glm_vec4 const sub2 = glm_vec4_sub(_mm_set1_ps(3.0f), mul0); - glm_vec4 const mul1 = glm_vec4_mul(clp0, clp0); - glm_vec4 const mul2 = glm_vec4_mul(mul1, sub2); - return mul2; -} - -// Agner Fog method -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_nan(glm_vec4 x) -{ - glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer - glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit - glm_ivec4 const t3 = _mm_set1_epi32(int(0xFF000000)); // exponent mask - glm_ivec4 const t4 = _mm_and_si128(t2, t3); // exponent - glm_ivec4 const t5 = _mm_andnot_si128(t3, t2); // fraction - glm_ivec4 const Equal = _mm_cmpeq_epi32(t3, t4); - glm_ivec4 const Nequal = _mm_cmpeq_epi32(t5, _mm_setzero_si128()); - glm_ivec4 const And = _mm_and_si128(Equal, Nequal); - return _mm_castsi128_ps(And); // exponent = all 1s and fraction != 0 -} - -// Agner Fog method -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_inf(glm_vec4 x) -{ - glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer - glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit - return _mm_castsi128_ps(_mm_cmpeq_epi32(t2, _mm_set1_epi32(int(0xFF000000)))); // exponent is all 1s, fraction is 0 -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/simd/exponential.h b/third_party/glm/simd/exponential.h deleted file mode 100755 index bc351d0..0000000 --- a/third_party/glm/simd/exponential.h +++ /dev/null @@ -1,20 +0,0 @@ -/// @ref simd -/// @file glm/simd/experimental.h - -#pragma once - -#include "platform.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_sqrt_lowp(glm_f32vec4 x) -{ - return _mm_mul_ss(_mm_rsqrt_ss(x), x); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_sqrt_lowp(glm_f32vec4 x) -{ - return _mm_mul_ps(_mm_rsqrt_ps(x), x); -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/simd/geometric.h b/third_party/glm/simd/geometric.h deleted file mode 100755 index 07d7cbc..0000000 --- a/third_party/glm/simd/geometric.h +++ /dev/null @@ -1,124 +0,0 @@ -/// @ref simd -/// @file glm/simd/geometric.h - -#pragma once - -#include "common.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_DECL glm_vec4 glm_vec4_dot(glm_vec4 v1, glm_vec4 v2); -GLM_FUNC_DECL glm_vec4 glm_vec1_dot(glm_vec4 v1, glm_vec4 v2); - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_length(glm_vec4 x) -{ - glm_vec4 const dot0 = glm_vec4_dot(x, x); - glm_vec4 const sqt0 = _mm_sqrt_ps(dot0); - return sqt0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_distance(glm_vec4 p0, glm_vec4 p1) -{ - glm_vec4 const sub0 = _mm_sub_ps(p0, p1); - glm_vec4 const len0 = glm_vec4_length(sub0); - return len0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_dot(glm_vec4 v1, glm_vec4 v2) -{ -# if GLM_ARCH & GLM_ARCH_AVX_BIT - return _mm_dp_ps(v1, v2, 0xff); -# elif GLM_ARCH & GLM_ARCH_SSE3_BIT - glm_vec4 const mul0 = _mm_mul_ps(v1, v2); - glm_vec4 const hadd0 = _mm_hadd_ps(mul0, mul0); - glm_vec4 const hadd1 = _mm_hadd_ps(hadd0, hadd0); - return hadd1; -# else - glm_vec4 const mul0 = _mm_mul_ps(v1, v2); - glm_vec4 const swp0 = _mm_shuffle_ps(mul0, mul0, _MM_SHUFFLE(2, 3, 0, 1)); - glm_vec4 const add0 = _mm_add_ps(mul0, swp0); - glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, _MM_SHUFFLE(0, 1, 2, 3)); - glm_vec4 const add1 = _mm_add_ps(add0, swp1); - return add1; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_dot(glm_vec4 v1, glm_vec4 v2) -{ -# if GLM_ARCH & GLM_ARCH_AVX_BIT - return _mm_dp_ps(v1, v2, 0xff); -# elif GLM_ARCH & GLM_ARCH_SSE3_BIT - glm_vec4 const mul0 = _mm_mul_ps(v1, v2); - glm_vec4 const had0 = _mm_hadd_ps(mul0, mul0); - glm_vec4 const had1 = _mm_hadd_ps(had0, had0); - return had1; -# else - glm_vec4 const mul0 = _mm_mul_ps(v1, v2); - glm_vec4 const mov0 = _mm_movehl_ps(mul0, mul0); - glm_vec4 const add0 = _mm_add_ps(mov0, mul0); - glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, 1); - glm_vec4 const add1 = _mm_add_ss(add0, swp1); - return add1; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_cross(glm_vec4 v1, glm_vec4 v2) -{ - glm_vec4 const swp0 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 0, 2, 1)); - glm_vec4 const swp1 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 1, 0, 2)); - glm_vec4 const swp2 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 0, 2, 1)); - glm_vec4 const swp3 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 1, 0, 2)); - glm_vec4 const mul0 = _mm_mul_ps(swp0, swp3); - glm_vec4 const mul1 = _mm_mul_ps(swp1, swp2); - glm_vec4 const sub0 = _mm_sub_ps(mul0, mul1); - return sub0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_normalize(glm_vec4 v) -{ - glm_vec4 const dot0 = glm_vec4_dot(v, v); - glm_vec4 const isr0 = _mm_rsqrt_ps(dot0); - glm_vec4 const mul0 = _mm_mul_ps(v, isr0); - return mul0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_faceforward(glm_vec4 N, glm_vec4 I, glm_vec4 Nref) -{ - glm_vec4 const dot0 = glm_vec4_dot(Nref, I); - glm_vec4 const sgn0 = glm_vec4_sign(dot0); - glm_vec4 const mul0 = _mm_mul_ps(sgn0, _mm_set1_ps(-1.0f)); - glm_vec4 const mul1 = _mm_mul_ps(N, mul0); - return mul1; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_reflect(glm_vec4 I, glm_vec4 N) -{ - glm_vec4 const dot0 = glm_vec4_dot(N, I); - glm_vec4 const mul0 = _mm_mul_ps(N, dot0); - glm_vec4 const mul1 = _mm_mul_ps(mul0, _mm_set1_ps(2.0f)); - glm_vec4 const sub0 = _mm_sub_ps(I, mul1); - return sub0; -} - -GLM_FUNC_QUALIFIER __m128 glm_vec4_refract(glm_vec4 I, glm_vec4 N, glm_vec4 eta) -{ - glm_vec4 const dot0 = glm_vec4_dot(N, I); - glm_vec4 const mul0 = _mm_mul_ps(eta, eta); - glm_vec4 const mul1 = _mm_mul_ps(dot0, dot0); - glm_vec4 const sub0 = _mm_sub_ps(_mm_set1_ps(1.0f), mul0); - glm_vec4 const sub1 = _mm_sub_ps(_mm_set1_ps(1.0f), mul1); - glm_vec4 const mul2 = _mm_mul_ps(sub0, sub1); - - if(_mm_movemask_ps(_mm_cmplt_ss(mul2, _mm_set1_ps(0.0f))) == 0) - return _mm_set1_ps(0.0f); - - glm_vec4 const sqt0 = _mm_sqrt_ps(mul2); - glm_vec4 const mad0 = glm_vec4_fma(eta, dot0, sqt0); - glm_vec4 const mul4 = _mm_mul_ps(mad0, N); - glm_vec4 const mul5 = _mm_mul_ps(eta, I); - glm_vec4 const sub2 = _mm_sub_ps(mul5, mul4); - - return sub2; -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/simd/integer.h b/third_party/glm/simd/integer.h deleted file mode 100755 index 9381418..0000000 --- a/third_party/glm/simd/integer.h +++ /dev/null @@ -1,115 +0,0 @@ -/// @ref simd -/// @file glm/simd/integer.h - -#pragma once - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_QUALIFIER glm_uvec4 glm_i128_interleave(glm_uvec4 x) -{ - glm_uvec4 const Mask4 = _mm_set1_epi32(0x0000FFFF); - glm_uvec4 const Mask3 = _mm_set1_epi32(0x00FF00FF); - glm_uvec4 const Mask2 = _mm_set1_epi32(0x0F0F0F0F); - glm_uvec4 const Mask1 = _mm_set1_epi32(0x33333333); - glm_uvec4 const Mask0 = _mm_set1_epi32(0x55555555); - - glm_uvec4 Reg1; - glm_uvec4 Reg2; - - // REG1 = x; - // REG2 = y; - //Reg1 = _mm_unpacklo_epi64(x, y); - Reg1 = x; - - //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF); - //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF); - Reg2 = _mm_slli_si128(Reg1, 2); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask4); - - //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF); - //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF); - Reg2 = _mm_slli_si128(Reg1, 1); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask3); - - //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F); - //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F); - Reg2 = _mm_slli_epi32(Reg1, 4); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask2); - - //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333); - //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333); - Reg2 = _mm_slli_epi32(Reg1, 2); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask1); - - //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555); - //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555); - Reg2 = _mm_slli_epi32(Reg1, 1); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask0); - - //return REG1 | (REG2 << 1); - Reg2 = _mm_slli_epi32(Reg1, 1); - Reg2 = _mm_srli_si128(Reg2, 8); - Reg1 = _mm_or_si128(Reg1, Reg2); - - return Reg1; -} - -GLM_FUNC_QUALIFIER glm_uvec4 glm_i128_interleave2(glm_uvec4 x, glm_uvec4 y) -{ - glm_uvec4 const Mask4 = _mm_set1_epi32(0x0000FFFF); - glm_uvec4 const Mask3 = _mm_set1_epi32(0x00FF00FF); - glm_uvec4 const Mask2 = _mm_set1_epi32(0x0F0F0F0F); - glm_uvec4 const Mask1 = _mm_set1_epi32(0x33333333); - glm_uvec4 const Mask0 = _mm_set1_epi32(0x55555555); - - glm_uvec4 Reg1; - glm_uvec4 Reg2; - - // REG1 = x; - // REG2 = y; - Reg1 = _mm_unpacklo_epi64(x, y); - - //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF); - //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF); - Reg2 = _mm_slli_si128(Reg1, 2); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask4); - - //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF); - //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF); - Reg2 = _mm_slli_si128(Reg1, 1); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask3); - - //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F); - //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F); - Reg2 = _mm_slli_epi32(Reg1, 4); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask2); - - //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333); - //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333); - Reg2 = _mm_slli_epi32(Reg1, 2); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask1); - - //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555); - //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555); - Reg2 = _mm_slli_epi32(Reg1, 1); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask0); - - //return REG1 | (REG2 << 1); - Reg2 = _mm_slli_epi32(Reg1, 1); - Reg2 = _mm_srli_si128(Reg2, 8); - Reg1 = _mm_or_si128(Reg1, Reg2); - - return Reg1; -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/simd/matrix.h b/third_party/glm/simd/matrix.h deleted file mode 100755 index b6c42ea..0000000 --- a/third_party/glm/simd/matrix.h +++ /dev/null @@ -1,1028 +0,0 @@ -/// @ref simd -/// @file glm/simd/matrix.h - -#pragma once - -#include "geometric.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_QUALIFIER void glm_mat4_matrixCompMult(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) -{ - out[0] = _mm_mul_ps(in1[0], in2[0]); - out[1] = _mm_mul_ps(in1[1], in2[1]); - out[2] = _mm_mul_ps(in1[2], in2[2]); - out[3] = _mm_mul_ps(in1[3], in2[3]); -} - -GLM_FUNC_QUALIFIER void glm_mat4_add(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) -{ - out[0] = _mm_add_ps(in1[0], in2[0]); - out[1] = _mm_add_ps(in1[1], in2[1]); - out[2] = _mm_add_ps(in1[2], in2[2]); - out[3] = _mm_add_ps(in1[3], in2[3]); -} - -GLM_FUNC_QUALIFIER void glm_mat4_sub(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) -{ - out[0] = _mm_sub_ps(in1[0], in2[0]); - out[1] = _mm_sub_ps(in1[1], in2[1]); - out[2] = _mm_sub_ps(in1[2], in2[2]); - out[3] = _mm_sub_ps(in1[3], in2[3]); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_mul_vec4(glm_vec4 const m[4], glm_vec4 v) -{ - __m128 v0 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 v1 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 1, 1, 1)); - __m128 v2 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(2, 2, 2, 2)); - __m128 v3 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(m[0], v0); - __m128 m1 = _mm_mul_ps(m[1], v1); - __m128 m2 = _mm_mul_ps(m[2], v2); - __m128 m3 = _mm_mul_ps(m[3], v3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - return a2; -} - -GLM_FUNC_QUALIFIER __m128 glm_vec4_mul_mat4(glm_vec4 v, glm_vec4 const m[4]) -{ - __m128 i0 = m[0]; - __m128 i1 = m[1]; - __m128 i2 = m[2]; - __m128 i3 = m[3]; - - __m128 m0 = _mm_mul_ps(v, i0); - __m128 m1 = _mm_mul_ps(v, i1); - __m128 m2 = _mm_mul_ps(v, i2); - __m128 m3 = _mm_mul_ps(v, i3); - - __m128 u0 = _mm_unpacklo_ps(m0, m1); - __m128 u1 = _mm_unpackhi_ps(m0, m1); - __m128 a0 = _mm_add_ps(u0, u1); - - __m128 u2 = _mm_unpacklo_ps(m2, m3); - __m128 u3 = _mm_unpackhi_ps(m2, m3); - __m128 a1 = _mm_add_ps(u2, u3); - - __m128 f0 = _mm_movelh_ps(a0, a1); - __m128 f1 = _mm_movehl_ps(a1, a0); - __m128 f2 = _mm_add_ps(f0, f1); - - return f2; -} - -GLM_FUNC_QUALIFIER void glm_mat4_mul(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) -{ - { - __m128 e0 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 e1 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 e2 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 e3 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(in1[0], e0); - __m128 m1 = _mm_mul_ps(in1[1], e1); - __m128 m2 = _mm_mul_ps(in1[2], e2); - __m128 m3 = _mm_mul_ps(in1[3], e3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - out[0] = a2; - } - - { - __m128 e0 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 e1 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 e2 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 e3 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(in1[0], e0); - __m128 m1 = _mm_mul_ps(in1[1], e1); - __m128 m2 = _mm_mul_ps(in1[2], e2); - __m128 m3 = _mm_mul_ps(in1[3], e3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - out[1] = a2; - } - - { - __m128 e0 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 e1 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 e2 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 e3 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(in1[0], e0); - __m128 m1 = _mm_mul_ps(in1[1], e1); - __m128 m2 = _mm_mul_ps(in1[2], e2); - __m128 m3 = _mm_mul_ps(in1[3], e3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - out[2] = a2; - } - - { - //(__m128&)_mm_shuffle_epi32(__m128i&)in2[0], _MM_SHUFFLE(3, 3, 3, 3)) - __m128 e0 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 e1 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 e2 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 e3 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(in1[0], e0); - __m128 m1 = _mm_mul_ps(in1[1], e1); - __m128 m2 = _mm_mul_ps(in1[2], e2); - __m128 m3 = _mm_mul_ps(in1[3], e3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - out[3] = a2; - } -} - -GLM_FUNC_QUALIFIER void glm_mat4_transpose(glm_vec4 const in[4], glm_vec4 out[4]) -{ - __m128 tmp0 = _mm_shuffle_ps(in[0], in[1], 0x44); - __m128 tmp2 = _mm_shuffle_ps(in[0], in[1], 0xEE); - __m128 tmp1 = _mm_shuffle_ps(in[2], in[3], 0x44); - __m128 tmp3 = _mm_shuffle_ps(in[2], in[3], 0xEE); - - out[0] = _mm_shuffle_ps(tmp0, tmp1, 0x88); - out[1] = _mm_shuffle_ps(tmp0, tmp1, 0xDD); - out[2] = _mm_shuffle_ps(tmp2, tmp3, 0x88); - out[3] = _mm_shuffle_ps(tmp2, tmp3, 0xDD); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant_highp(glm_vec4 const in[4]) -{ - __m128 Fac0; - { - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; - // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac0 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac1; - { - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; - // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac1 = _mm_sub_ps(Mul00, Mul01); - } - - - __m128 Fac2; - { - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; - // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac2 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac3; - { - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; - // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac3 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac4; - { - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; - // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac4 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac5; - { - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; - // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac5 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); - __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); - - // m[1][0] - // m[0][0] - // m[0][0] - // m[0][0] - __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][1] - // m[0][1] - // m[0][1] - // m[0][1] - __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][2] - // m[0][2] - // m[0][2] - // m[0][2] - __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][3] - // m[0][3] - // m[0][3] - // m[0][3] - __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); - - // col0 - // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), - // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), - // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), - // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), - __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); - __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); - __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); - __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); - __m128 Add00 = _mm_add_ps(Sub00, Mul02); - __m128 Inv0 = _mm_mul_ps(SignB, Add00); - - // col1 - // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), - // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), - // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), - // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), - __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); - __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); - __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); - __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); - __m128 Add01 = _mm_add_ps(Sub01, Mul05); - __m128 Inv1 = _mm_mul_ps(SignA, Add01); - - // col2 - // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), - // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), - // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), - // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), - __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); - __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); - __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); - __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); - __m128 Add02 = _mm_add_ps(Sub02, Mul08); - __m128 Inv2 = _mm_mul_ps(SignB, Add02); - - // col3 - // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), - // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), - // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), - // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); - __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); - __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); - __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); - __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); - __m128 Add03 = _mm_add_ps(Sub03, Mul11); - __m128 Inv3 = _mm_mul_ps(SignA, Add03); - - __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); - - // valType Determinant = m[0][0] * Inverse[0][0] - // + m[0][1] * Inverse[1][0] - // + m[0][2] * Inverse[2][0] - // + m[0][3] * Inverse[3][0]; - __m128 Det0 = glm_vec4_dot(in[0], Row2); - return Det0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant_lowp(glm_vec4 const m[4]) -{ - // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128( - - //T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - //T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - //T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - //T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - //T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - //T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - - // First 2 columns - __m128 Swp2A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 1, 1, 2))); - __m128 Swp3A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(3, 2, 3, 3))); - __m128 MulA = _mm_mul_ps(Swp2A, Swp3A); - - // Second 2 columns - __m128 Swp2B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(3, 2, 3, 3))); - __m128 Swp3B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(0, 1, 1, 2))); - __m128 MulB = _mm_mul_ps(Swp2B, Swp3B); - - // Columns subtraction - __m128 SubE = _mm_sub_ps(MulA, MulB); - - // Last 2 rows - __m128 Swp2C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 0, 1, 2))); - __m128 Swp3C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(1, 2, 0, 0))); - __m128 MulC = _mm_mul_ps(Swp2C, Swp3C); - __m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC); - - //vec<4, T, Q> DetCof( - // + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02), - // - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04), - // + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05), - // - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05)); - - __m128 SubFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubE), _MM_SHUFFLE(2, 1, 0, 0))); - __m128 SwpFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(0, 0, 0, 1))); - __m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA); - - __m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1)); - __m128 SubFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpB), _MM_SHUFFLE(3, 1, 1, 0)));//SubF[0], SubE[3], SubE[3], SubE[1]; - __m128 SwpFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(1, 1, 2, 2))); - __m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB); - - __m128 SubRes = _mm_sub_ps(MulFacA, MulFacB); - - __m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2)); - __m128 SubFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpC), _MM_SHUFFLE(3, 3, 2, 0))); - __m128 SwpFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(2, 3, 3, 3))); - __m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC); - - __m128 AddRes = _mm_add_ps(SubRes, MulFacC); - __m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f)); - - //return m[0][0] * DetCof[0] - // + m[0][1] * DetCof[1] - // + m[0][2] * DetCof[2] - // + m[0][3] * DetCof[3]; - - return glm_vec4_dot(m[0], DetCof); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant(glm_vec4 const m[4]) -{ - // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(add) - - //T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - //T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - //T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - //T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - //T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - //T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - - // First 2 columns - __m128 Swp2A = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 1, 1, 2)); - __m128 Swp3A = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(3, 2, 3, 3)); - __m128 MulA = _mm_mul_ps(Swp2A, Swp3A); - - // Second 2 columns - __m128 Swp2B = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(3, 2, 3, 3)); - __m128 Swp3B = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(0, 1, 1, 2)); - __m128 MulB = _mm_mul_ps(Swp2B, Swp3B); - - // Columns subtraction - __m128 SubE = _mm_sub_ps(MulA, MulB); - - // Last 2 rows - __m128 Swp2C = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 0, 1, 2)); - __m128 Swp3C = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(1, 2, 0, 0)); - __m128 MulC = _mm_mul_ps(Swp2C, Swp3C); - __m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC); - - //vec<4, T, Q> DetCof( - // + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02), - // - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04), - // + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05), - // - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05)); - - __m128 SubFacA = _mm_shuffle_ps(SubE, SubE, _MM_SHUFFLE(2, 1, 0, 0)); - __m128 SwpFacA = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(0, 0, 0, 1)); - __m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA); - - __m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1)); - __m128 SubFacB = _mm_shuffle_ps(SubTmpB, SubTmpB, _MM_SHUFFLE(3, 1, 1, 0));//SubF[0], SubE[3], SubE[3], SubE[1]; - __m128 SwpFacB = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(1, 1, 2, 2)); - __m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB); - - __m128 SubRes = _mm_sub_ps(MulFacA, MulFacB); - - __m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2)); - __m128 SubFacC = _mm_shuffle_ps(SubTmpC, SubTmpC, _MM_SHUFFLE(3, 3, 2, 0)); - __m128 SwpFacC = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(2, 3, 3, 3)); - __m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC); - - __m128 AddRes = _mm_add_ps(SubRes, MulFacC); - __m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f)); - - //return m[0][0] * DetCof[0] - // + m[0][1] * DetCof[1] - // + m[0][2] * DetCof[2] - // + m[0][3] * DetCof[3]; - - return glm_vec4_dot(m[0], DetCof); -} - -GLM_FUNC_QUALIFIER void glm_mat4_inverse(glm_vec4 const in[4], glm_vec4 out[4]) -{ - __m128 Fac0; - { - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; - // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac0 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac1; - { - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; - // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac1 = _mm_sub_ps(Mul00, Mul01); - } - - - __m128 Fac2; - { - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; - // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac2 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac3; - { - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; - // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac3 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac4; - { - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; - // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac4 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac5; - { - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; - // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac5 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); - __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); - - // m[1][0] - // m[0][0] - // m[0][0] - // m[0][0] - __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][1] - // m[0][1] - // m[0][1] - // m[0][1] - __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][2] - // m[0][2] - // m[0][2] - // m[0][2] - __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][3] - // m[0][3] - // m[0][3] - // m[0][3] - __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); - - // col0 - // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), - // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), - // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), - // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), - __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); - __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); - __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); - __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); - __m128 Add00 = _mm_add_ps(Sub00, Mul02); - __m128 Inv0 = _mm_mul_ps(SignB, Add00); - - // col1 - // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), - // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), - // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), - // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), - __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); - __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); - __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); - __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); - __m128 Add01 = _mm_add_ps(Sub01, Mul05); - __m128 Inv1 = _mm_mul_ps(SignA, Add01); - - // col2 - // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), - // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), - // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), - // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), - __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); - __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); - __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); - __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); - __m128 Add02 = _mm_add_ps(Sub02, Mul08); - __m128 Inv2 = _mm_mul_ps(SignB, Add02); - - // col3 - // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), - // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), - // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), - // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); - __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); - __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); - __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); - __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); - __m128 Add03 = _mm_add_ps(Sub03, Mul11); - __m128 Inv3 = _mm_mul_ps(SignA, Add03); - - __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); - - // valType Determinant = m[0][0] * Inverse[0][0] - // + m[0][1] * Inverse[1][0] - // + m[0][2] * Inverse[2][0] - // + m[0][3] * Inverse[3][0]; - __m128 Det0 = glm_vec4_dot(in[0], Row2); - __m128 Rcp0 = _mm_div_ps(_mm_set1_ps(1.0f), Det0); - //__m128 Rcp0 = _mm_rcp_ps(Det0); - - // Inverse /= Determinant; - out[0] = _mm_mul_ps(Inv0, Rcp0); - out[1] = _mm_mul_ps(Inv1, Rcp0); - out[2] = _mm_mul_ps(Inv2, Rcp0); - out[3] = _mm_mul_ps(Inv3, Rcp0); -} - -GLM_FUNC_QUALIFIER void glm_mat4_inverse_lowp(glm_vec4 const in[4], glm_vec4 out[4]) -{ - __m128 Fac0; - { - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; - // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac0 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac1; - { - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; - // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac1 = _mm_sub_ps(Mul00, Mul01); - } - - - __m128 Fac2; - { - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; - // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac2 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac3; - { - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; - // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac3 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac4; - { - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; - // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac4 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac5; - { - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; - // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac5 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); - __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); - - // m[1][0] - // m[0][0] - // m[0][0] - // m[0][0] - __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][1] - // m[0][1] - // m[0][1] - // m[0][1] - __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][2] - // m[0][2] - // m[0][2] - // m[0][2] - __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][3] - // m[0][3] - // m[0][3] - // m[0][3] - __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); - - // col0 - // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), - // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), - // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), - // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), - __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); - __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); - __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); - __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); - __m128 Add00 = _mm_add_ps(Sub00, Mul02); - __m128 Inv0 = _mm_mul_ps(SignB, Add00); - - // col1 - // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), - // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), - // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), - // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), - __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); - __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); - __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); - __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); - __m128 Add01 = _mm_add_ps(Sub01, Mul05); - __m128 Inv1 = _mm_mul_ps(SignA, Add01); - - // col2 - // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), - // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), - // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), - // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), - __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); - __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); - __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); - __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); - __m128 Add02 = _mm_add_ps(Sub02, Mul08); - __m128 Inv2 = _mm_mul_ps(SignB, Add02); - - // col3 - // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), - // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), - // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), - // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); - __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); - __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); - __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); - __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); - __m128 Add03 = _mm_add_ps(Sub03, Mul11); - __m128 Inv3 = _mm_mul_ps(SignA, Add03); - - __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); - - // valType Determinant = m[0][0] * Inverse[0][0] - // + m[0][1] * Inverse[1][0] - // + m[0][2] * Inverse[2][0] - // + m[0][3] * Inverse[3][0]; - __m128 Det0 = glm_vec4_dot(in[0], Row2); - __m128 Rcp0 = _mm_rcp_ps(Det0); - //__m128 Rcp0 = _mm_div_ps(one, Det0); - // Inverse /= Determinant; - out[0] = _mm_mul_ps(Inv0, Rcp0); - out[1] = _mm_mul_ps(Inv1, Rcp0); - out[2] = _mm_mul_ps(Inv2, Rcp0); - out[3] = _mm_mul_ps(Inv3, Rcp0); -} -/* -GLM_FUNC_QUALIFIER void glm_mat4_rotate(__m128 const in[4], float Angle, float const v[3], __m128 out[4]) -{ - float a = glm::radians(Angle); - float c = cos(a); - float s = sin(a); - - glm::vec4 AxisA(v[0], v[1], v[2], float(0)); - __m128 AxisB = _mm_set_ps(AxisA.w, AxisA.z, AxisA.y, AxisA.x); - __m128 AxisC = detail::sse_nrm_ps(AxisB); - - __m128 Cos0 = _mm_set_ss(c); - __m128 CosA = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Sin0 = _mm_set_ss(s); - __m128 SinA = _mm_shuffle_ps(Sin0, Sin0, _MM_SHUFFLE(0, 0, 0, 0)); - - // vec<3, T, Q> temp = (valType(1) - c) * axis; - __m128 Temp0 = _mm_sub_ps(one, CosA); - __m128 Temp1 = _mm_mul_ps(Temp0, AxisC); - - //Rotate[0][0] = c + temp[0] * axis[0]; - //Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2]; - //Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1]; - __m128 Axis0 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 TmpA0 = _mm_mul_ps(Axis0, AxisC); - __m128 CosA0 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 1, 0)); - __m128 TmpA1 = _mm_add_ps(CosA0, TmpA0); - __m128 SinA0 = SinA;//_mm_set_ps(0.0f, s, -s, 0.0f); - __m128 TmpA2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 1, 2, 3)); - __m128 TmpA3 = _mm_mul_ps(SinA0, TmpA2); - __m128 TmpA4 = _mm_add_ps(TmpA1, TmpA3); - - //Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2]; - //Rotate[1][1] = c + temp[1] * axis[1]; - //Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0]; - __m128 Axis1 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(1, 1, 1, 1)); - __m128 TmpB0 = _mm_mul_ps(Axis1, AxisC); - __m128 CosA1 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 0, 1)); - __m128 TmpB1 = _mm_add_ps(CosA1, TmpB0); - __m128 SinB0 = SinA;//_mm_set_ps(-s, 0.0f, s, 0.0f); - __m128 TmpB2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 0, 3, 2)); - __m128 TmpB3 = _mm_mul_ps(SinA0, TmpB2); - __m128 TmpB4 = _mm_add_ps(TmpB1, TmpB3); - - //Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1]; - //Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0]; - //Rotate[2][2] = c + temp[2] * axis[2]; - __m128 Axis2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(2, 2, 2, 2)); - __m128 TmpC0 = _mm_mul_ps(Axis2, AxisC); - __m128 CosA2 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 0, 1, 1)); - __m128 TmpC1 = _mm_add_ps(CosA2, TmpC0); - __m128 SinC0 = SinA;//_mm_set_ps(s, -s, 0.0f, 0.0f); - __m128 TmpC2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 3, 0, 1)); - __m128 TmpC3 = _mm_mul_ps(SinA0, TmpC2); - __m128 TmpC4 = _mm_add_ps(TmpC1, TmpC3); - - __m128 Result[4]; - Result[0] = TmpA4; - Result[1] = TmpB4; - Result[2] = TmpC4; - Result[3] = _mm_set_ps(1, 0, 0, 0); - - //mat<4, 4, valType> Result; - //Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2]; - //Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2]; - //Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2]; - //Result[3] = m[3]; - //return Result; - sse_mul_ps(in, Result, out); -} -*/ -GLM_FUNC_QUALIFIER void glm_mat4_outerProduct(__m128 const& c, __m128 const& r, __m128 out[4]) -{ - out[0] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(0, 0, 0, 0))); - out[1] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(1, 1, 1, 1))); - out[2] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(2, 2, 2, 2))); - out[3] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(3, 3, 3, 3))); -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/simd/neon.h b/third_party/glm/simd/neon.h deleted file mode 100755 index 6c38b06..0000000 --- a/third_party/glm/simd/neon.h +++ /dev/null @@ -1,155 +0,0 @@ -/// @ref simd_neon -/// @file glm/simd/neon.h - -#pragma once - -#if GLM_ARCH & GLM_ARCH_NEON_BIT -#include - -namespace glm { - namespace neon { - static float32x4_t dupq_lane(float32x4_t vsrc, int lane) { - switch(lane) { -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - case 0: return vdupq_laneq_f32(vsrc, 0); - case 1: return vdupq_laneq_f32(vsrc, 1); - case 2: return vdupq_laneq_f32(vsrc, 2); - case 3: return vdupq_laneq_f32(vsrc, 3); -#else - case 0: return vdupq_n_f32(vgetq_lane_f32(vsrc, 0)); - case 1: return vdupq_n_f32(vgetq_lane_f32(vsrc, 1)); - case 2: return vdupq_n_f32(vgetq_lane_f32(vsrc, 2)); - case 3: return vdupq_n_f32(vgetq_lane_f32(vsrc, 3)); -#endif - } - assert(!"Unreachable code executed!"); - return vdupq_n_f32(0.0f); - } - - static float32x2_t dup_lane(float32x4_t vsrc, int lane) { - switch(lane) { -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - case 0: return vdup_laneq_f32(vsrc, 0); - case 1: return vdup_laneq_f32(vsrc, 1); - case 2: return vdup_laneq_f32(vsrc, 2); - case 3: return vdup_laneq_f32(vsrc, 3); -#else - case 0: return vdup_n_f32(vgetq_lane_f32(vsrc, 0)); - case 1: return vdup_n_f32(vgetq_lane_f32(vsrc, 1)); - case 2: return vdup_n_f32(vgetq_lane_f32(vsrc, 2)); - case 3: return vdup_n_f32(vgetq_lane_f32(vsrc, 3)); -#endif - } - assert(!"Unreachable code executed!"); - return vdup_n_f32(0.0f); - } - - static float32x4_t copy_lane(float32x4_t vdst, int dlane, float32x4_t vsrc, int slane) { -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - switch(dlane) { - case 0: - switch(slane) { - case 0: return vcopyq_laneq_f32(vdst, 0, vsrc, 0); - case 1: return vcopyq_laneq_f32(vdst, 0, vsrc, 1); - case 2: return vcopyq_laneq_f32(vdst, 0, vsrc, 2); - case 3: return vcopyq_laneq_f32(vdst, 0, vsrc, 3); - } - assert(!"Unreachable code executed!"); - case 1: - switch(slane) { - case 0: return vcopyq_laneq_f32(vdst, 1, vsrc, 0); - case 1: return vcopyq_laneq_f32(vdst, 1, vsrc, 1); - case 2: return vcopyq_laneq_f32(vdst, 1, vsrc, 2); - case 3: return vcopyq_laneq_f32(vdst, 1, vsrc, 3); - } - assert(!"Unreachable code executed!"); - case 2: - switch(slane) { - case 0: return vcopyq_laneq_f32(vdst, 2, vsrc, 0); - case 1: return vcopyq_laneq_f32(vdst, 2, vsrc, 1); - case 2: return vcopyq_laneq_f32(vdst, 2, vsrc, 2); - case 3: return vcopyq_laneq_f32(vdst, 2, vsrc, 3); - } - assert(!"Unreachable code executed!"); - case 3: - switch(slane) { - case 0: return vcopyq_laneq_f32(vdst, 3, vsrc, 0); - case 1: return vcopyq_laneq_f32(vdst, 3, vsrc, 1); - case 2: return vcopyq_laneq_f32(vdst, 3, vsrc, 2); - case 3: return vcopyq_laneq_f32(vdst, 3, vsrc, 3); - } - assert(!"Unreachable code executed!"); - } -#else - - float l; - switch(slane) { - case 0: l = vgetq_lane_f32(vsrc, 0); break; - case 1: l = vgetq_lane_f32(vsrc, 1); break; - case 2: l = vgetq_lane_f32(vsrc, 2); break; - case 3: l = vgetq_lane_f32(vsrc, 3); break; - default: - assert(!"Unreachable code executed!"); - } - switch(dlane) { - case 0: return vsetq_lane_f32(l, vdst, 0); - case 1: return vsetq_lane_f32(l, vdst, 1); - case 2: return vsetq_lane_f32(l, vdst, 2); - case 3: return vsetq_lane_f32(l, vdst, 3); - } -#endif - assert(!"Unreachable code executed!"); - return vdupq_n_f32(0.0f); - } - - static float32x4_t mul_lane(float32x4_t v, float32x4_t vlane, int lane) { -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - switch(lane) { - case 0: return vmulq_laneq_f32(v, vlane, 0); break; - case 1: return vmulq_laneq_f32(v, vlane, 1); break; - case 2: return vmulq_laneq_f32(v, vlane, 2); break; - case 3: return vmulq_laneq_f32(v, vlane, 3); break; - default: - assert(!"Unreachable code executed!"); - } - assert(!"Unreachable code executed!"); - return vdupq_n_f32(0.0f); -#else - return vmulq_f32(v, dupq_lane(vlane, lane)); -#endif - } - - static float32x4_t madd_lane(float32x4_t acc, float32x4_t v, float32x4_t vlane, int lane) { -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT -#ifdef GLM_CONFIG_FORCE_FMA -# define FMADD_LANE(acc, x, y, L) do { asm volatile ("fmla %0.4s, %1.4s, %2.4s" : "+w"(acc) : "w"(x), "w"(dup_lane(y, L))); } while(0) -#else -# define FMADD_LANE(acc, x, y, L) do { acc = vmlaq_laneq_f32(acc, x, y, L); } while(0) -#endif - - switch(lane) { - case 0: - FMADD_LANE(acc, v, vlane, 0); - return acc; - case 1: - FMADD_LANE(acc, v, vlane, 1); - return acc; - case 2: - FMADD_LANE(acc, v, vlane, 2); - return acc; - case 3: - FMADD_LANE(acc, v, vlane, 3); - return acc; - default: - assert(!"Unreachable code executed!"); - } - assert(!"Unreachable code executed!"); - return vdupq_n_f32(0.0f); -# undef FMADD_LANE -#else - return vaddq_f32(acc, vmulq_f32(v, dupq_lane(vlane, lane))); -#endif - } - } //namespace neon -} // namespace glm -#endif // GLM_ARCH & GLM_ARCH_NEON_BIT diff --git a/third_party/glm/simd/packing.h b/third_party/glm/simd/packing.h deleted file mode 100755 index 609163e..0000000 --- a/third_party/glm/simd/packing.h +++ /dev/null @@ -1,8 +0,0 @@ -/// @ref simd -/// @file glm/simd/packing.h - -#pragma once - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/simd/platform.h b/third_party/glm/simd/platform.h deleted file mode 100755 index ad25cc1..0000000 --- a/third_party/glm/simd/platform.h +++ /dev/null @@ -1,398 +0,0 @@ -#pragma once - -/////////////////////////////////////////////////////////////////////////////////// -// Platform - -#define GLM_PLATFORM_UNKNOWN 0x00000000 -#define GLM_PLATFORM_WINDOWS 0x00010000 -#define GLM_PLATFORM_LINUX 0x00020000 -#define GLM_PLATFORM_APPLE 0x00040000 -//#define GLM_PLATFORM_IOS 0x00080000 -#define GLM_PLATFORM_ANDROID 0x00100000 -#define GLM_PLATFORM_CHROME_NACL 0x00200000 -#define GLM_PLATFORM_UNIX 0x00400000 -#define GLM_PLATFORM_QNXNTO 0x00800000 -#define GLM_PLATFORM_WINCE 0x01000000 -#define GLM_PLATFORM_CYGWIN 0x02000000 - -#ifdef GLM_FORCE_PLATFORM_UNKNOWN -# define GLM_PLATFORM GLM_PLATFORM_UNKNOWN -#elif defined(__CYGWIN__) -# define GLM_PLATFORM GLM_PLATFORM_CYGWIN -#elif defined(__QNXNTO__) -# define GLM_PLATFORM GLM_PLATFORM_QNXNTO -#elif defined(__APPLE__) -# define GLM_PLATFORM GLM_PLATFORM_APPLE -#elif defined(WINCE) -# define GLM_PLATFORM GLM_PLATFORM_WINCE -#elif defined(_WIN32) -# define GLM_PLATFORM GLM_PLATFORM_WINDOWS -#elif defined(__native_client__) -# define GLM_PLATFORM GLM_PLATFORM_CHROME_NACL -#elif defined(__ANDROID__) -# define GLM_PLATFORM GLM_PLATFORM_ANDROID -#elif defined(__linux) -# define GLM_PLATFORM GLM_PLATFORM_LINUX -#elif defined(__unix) -# define GLM_PLATFORM GLM_PLATFORM_UNIX -#else -# define GLM_PLATFORM GLM_PLATFORM_UNKNOWN -#endif// - -/////////////////////////////////////////////////////////////////////////////////// -// Compiler - -#define GLM_COMPILER_UNKNOWN 0x00000000 - -// Intel -#define GLM_COMPILER_INTEL 0x00100000 -#define GLM_COMPILER_INTEL14 0x00100040 -#define GLM_COMPILER_INTEL15 0x00100050 -#define GLM_COMPILER_INTEL16 0x00100060 -#define GLM_COMPILER_INTEL17 0x00100070 - -// Visual C++ defines -#define GLM_COMPILER_VC 0x01000000 -#define GLM_COMPILER_VC12 0x01000001 -#define GLM_COMPILER_VC14 0x01000002 -#define GLM_COMPILER_VC15 0x01000003 -#define GLM_COMPILER_VC15_3 0x01000004 -#define GLM_COMPILER_VC15_5 0x01000005 -#define GLM_COMPILER_VC15_6 0x01000006 -#define GLM_COMPILER_VC15_7 0x01000007 -#define GLM_COMPILER_VC15_8 0x01000008 -#define GLM_COMPILER_VC15_9 0x01000009 -#define GLM_COMPILER_VC16 0x0100000A - -// GCC defines -#define GLM_COMPILER_GCC 0x02000000 -#define GLM_COMPILER_GCC46 0x020000D0 -#define GLM_COMPILER_GCC47 0x020000E0 -#define GLM_COMPILER_GCC48 0x020000F0 -#define GLM_COMPILER_GCC49 0x02000100 -#define GLM_COMPILER_GCC5 0x02000200 -#define GLM_COMPILER_GCC6 0x02000300 -#define GLM_COMPILER_GCC7 0x02000400 -#define GLM_COMPILER_GCC8 0x02000500 - -// CUDA -#define GLM_COMPILER_CUDA 0x10000000 -#define GLM_COMPILER_CUDA75 0x10000001 -#define GLM_COMPILER_CUDA80 0x10000002 -#define GLM_COMPILER_CUDA90 0x10000004 - -// SYCL -#define GLM_COMPILER_SYCL 0x00300000 - -// Clang -#define GLM_COMPILER_CLANG 0x20000000 -#define GLM_COMPILER_CLANG34 0x20000050 -#define GLM_COMPILER_CLANG35 0x20000060 -#define GLM_COMPILER_CLANG36 0x20000070 -#define GLM_COMPILER_CLANG37 0x20000080 -#define GLM_COMPILER_CLANG38 0x20000090 -#define GLM_COMPILER_CLANG39 0x200000A0 -#define GLM_COMPILER_CLANG40 0x200000B0 -#define GLM_COMPILER_CLANG41 0x200000C0 -#define GLM_COMPILER_CLANG42 0x200000D0 - -// Build model -#define GLM_MODEL_32 0x00000010 -#define GLM_MODEL_64 0x00000020 - -// Force generic C++ compiler -#ifdef GLM_FORCE_COMPILER_UNKNOWN -# define GLM_COMPILER GLM_COMPILER_UNKNOWN - -#elif defined(__INTEL_COMPILER) -# if __INTEL_COMPILER >= 1700 -# define GLM_COMPILER GLM_COMPILER_INTEL17 -# elif __INTEL_COMPILER >= 1600 -# define GLM_COMPILER GLM_COMPILER_INTEL16 -# elif __INTEL_COMPILER >= 1500 -# define GLM_COMPILER GLM_COMPILER_INTEL15 -# elif __INTEL_COMPILER >= 1400 -# define GLM_COMPILER GLM_COMPILER_INTEL14 -# elif __INTEL_COMPILER < 1400 -# error "GLM requires ICC 2013 SP1 or newer" -# endif - -// CUDA -#elif defined(__CUDACC__) -# if !defined(CUDA_VERSION) && !defined(GLM_FORCE_CUDA) -# include // make sure version is defined since nvcc does not define it itself! -# endif -# if CUDA_VERSION >= 8000 -# define GLM_COMPILER GLM_COMPILER_CUDA80 -# elif CUDA_VERSION >= 7500 -# define GLM_COMPILER GLM_COMPILER_CUDA75 -# elif CUDA_VERSION >= 7000 -# define GLM_COMPILER GLM_COMPILER_CUDA70 -# elif CUDA_VERSION < 7000 -# error "GLM requires CUDA 7.0 or higher" -# endif - -// SYCL -#elif defined(__SYCL_DEVICE_ONLY__) -# define GLM_COMPILER GLM_COMPILER_SYCL - -// Clang -#elif defined(__clang__) -# if defined(__apple_build_version__) -# if (__clang_major__ < 6) -# error "GLM requires Clang 3.4 / Apple Clang 6.0 or higher" -# elif __clang_major__ == 6 && __clang_minor__ == 0 -# define GLM_COMPILER GLM_COMPILER_CLANG35 -# elif __clang_major__ == 6 && __clang_minor__ >= 1 -# define GLM_COMPILER GLM_COMPILER_CLANG36 -# elif __clang_major__ >= 7 -# define GLM_COMPILER GLM_COMPILER_CLANG37 -# endif -# else -# if ((__clang_major__ == 3) && (__clang_minor__ < 4)) || (__clang_major__ < 3) -# error "GLM requires Clang 3.4 or higher" -# elif __clang_major__ == 3 && __clang_minor__ == 4 -# define GLM_COMPILER GLM_COMPILER_CLANG34 -# elif __clang_major__ == 3 && __clang_minor__ == 5 -# define GLM_COMPILER GLM_COMPILER_CLANG35 -# elif __clang_major__ == 3 && __clang_minor__ == 6 -# define GLM_COMPILER GLM_COMPILER_CLANG36 -# elif __clang_major__ == 3 && __clang_minor__ == 7 -# define GLM_COMPILER GLM_COMPILER_CLANG37 -# elif __clang_major__ == 3 && __clang_minor__ == 8 -# define GLM_COMPILER GLM_COMPILER_CLANG38 -# elif __clang_major__ == 3 && __clang_minor__ >= 9 -# define GLM_COMPILER GLM_COMPILER_CLANG39 -# elif __clang_major__ == 4 && __clang_minor__ == 0 -# define GLM_COMPILER GLM_COMPILER_CLANG40 -# elif __clang_major__ == 4 && __clang_minor__ == 1 -# define GLM_COMPILER GLM_COMPILER_CLANG41 -# elif __clang_major__ == 4 && __clang_minor__ >= 2 -# define GLM_COMPILER GLM_COMPILER_CLANG42 -# elif __clang_major__ >= 4 -# define GLM_COMPILER GLM_COMPILER_CLANG42 -# endif -# endif - -// Visual C++ -#elif defined(_MSC_VER) -# if _MSC_VER >= 1920 -# define GLM_COMPILER GLM_COMPILER_VC16 -# elif _MSC_VER >= 1916 -# define GLM_COMPILER GLM_COMPILER_VC15_9 -# elif _MSC_VER >= 1915 -# define GLM_COMPILER GLM_COMPILER_VC15_8 -# elif _MSC_VER >= 1914 -# define GLM_COMPILER GLM_COMPILER_VC15_7 -# elif _MSC_VER >= 1913 -# define GLM_COMPILER GLM_COMPILER_VC15_6 -# elif _MSC_VER >= 1912 -# define GLM_COMPILER GLM_COMPILER_VC15_5 -# elif _MSC_VER >= 1911 -# define GLM_COMPILER GLM_COMPILER_VC15_3 -# elif _MSC_VER >= 1910 -# define GLM_COMPILER GLM_COMPILER_VC15 -# elif _MSC_VER >= 1900 -# define GLM_COMPILER GLM_COMPILER_VC14 -# elif _MSC_VER >= 1800 -# define GLM_COMPILER GLM_COMPILER_VC12 -# elif _MSC_VER < 1800 -# error "GLM requires Visual C++ 12 - 2013 or higher" -# endif//_MSC_VER - -// G++ -#elif defined(__GNUC__) || defined(__MINGW32__) -# if __GNUC__ >= 8 -# define GLM_COMPILER GLM_COMPILER_GCC8 -# elif __GNUC__ >= 7 -# define GLM_COMPILER GLM_COMPILER_GCC7 -# elif __GNUC__ >= 6 -# define GLM_COMPILER GLM_COMPILER_GCC6 -# elif __GNUC__ >= 5 -# define GLM_COMPILER GLM_COMPILER_GCC5 -# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 -# define GLM_COMPILER GLM_COMPILER_GCC49 -# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 8 -# define GLM_COMPILER GLM_COMPILER_GCC48 -# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 7 -# define GLM_COMPILER GLM_COMPILER_GCC47 -# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 6 -# define GLM_COMPILER GLM_COMPILER_GCC46 -# elif ((__GNUC__ == 4) && (__GNUC_MINOR__ < 6)) || (__GNUC__ < 4) -# error "GLM requires GCC 4.6 or higher" -# endif - -#else -# define GLM_COMPILER GLM_COMPILER_UNKNOWN -#endif - -#ifndef GLM_COMPILER -# error "GLM_COMPILER undefined, your compiler may not be supported by GLM. Add #define GLM_COMPILER 0 to ignore this message." -#endif//GLM_COMPILER - -/////////////////////////////////////////////////////////////////////////////////// -// Instruction sets - -// User defines: GLM_FORCE_PURE GLM_FORCE_INTRINSICS GLM_FORCE_SSE2 GLM_FORCE_SSE3 GLM_FORCE_AVX GLM_FORCE_AVX2 GLM_FORCE_AVX2 - -#define GLM_ARCH_MIPS_BIT (0x10000000) -#define GLM_ARCH_PPC_BIT (0x20000000) -#define GLM_ARCH_ARM_BIT (0x40000000) -#define GLM_ARCH_ARMV8_BIT (0x01000000) -#define GLM_ARCH_X86_BIT (0x80000000) - -#define GLM_ARCH_SIMD_BIT (0x00001000) - -#define GLM_ARCH_NEON_BIT (0x00000001) -#define GLM_ARCH_SSE_BIT (0x00000002) -#define GLM_ARCH_SSE2_BIT (0x00000004) -#define GLM_ARCH_SSE3_BIT (0x00000008) -#define GLM_ARCH_SSSE3_BIT (0x00000010) -#define GLM_ARCH_SSE41_BIT (0x00000020) -#define GLM_ARCH_SSE42_BIT (0x00000040) -#define GLM_ARCH_AVX_BIT (0x00000080) -#define GLM_ARCH_AVX2_BIT (0x00000100) - -#define GLM_ARCH_UNKNOWN (0) -#define GLM_ARCH_X86 (GLM_ARCH_X86_BIT) -#define GLM_ARCH_SSE (GLM_ARCH_SSE_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_X86) -#define GLM_ARCH_SSE2 (GLM_ARCH_SSE2_BIT | GLM_ARCH_SSE) -#define GLM_ARCH_SSE3 (GLM_ARCH_SSE3_BIT | GLM_ARCH_SSE2) -#define GLM_ARCH_SSSE3 (GLM_ARCH_SSSE3_BIT | GLM_ARCH_SSE3) -#define GLM_ARCH_SSE41 (GLM_ARCH_SSE41_BIT | GLM_ARCH_SSSE3) -#define GLM_ARCH_SSE42 (GLM_ARCH_SSE42_BIT | GLM_ARCH_SSE41) -#define GLM_ARCH_AVX (GLM_ARCH_AVX_BIT | GLM_ARCH_SSE42) -#define GLM_ARCH_AVX2 (GLM_ARCH_AVX2_BIT | GLM_ARCH_AVX) -#define GLM_ARCH_ARM (GLM_ARCH_ARM_BIT) -#define GLM_ARCH_ARMV8 (GLM_ARCH_NEON_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_ARM | GLM_ARCH_ARMV8_BIT) -#define GLM_ARCH_NEON (GLM_ARCH_NEON_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_ARM) -#define GLM_ARCH_MIPS (GLM_ARCH_MIPS_BIT) -#define GLM_ARCH_PPC (GLM_ARCH_PPC_BIT) - -#if defined(GLM_FORCE_ARCH_UNKNOWN) || defined(GLM_FORCE_PURE) -# define GLM_ARCH GLM_ARCH_UNKNOWN -#elif defined(GLM_FORCE_NEON) -# if __ARM_ARCH >= 8 -# define GLM_ARCH (GLM_ARCH_ARMV8) -# else -# define GLM_ARCH (GLM_ARCH_NEON) -# endif -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_AVX2) -# define GLM_ARCH (GLM_ARCH_AVX2) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_AVX) -# define GLM_ARCH (GLM_ARCH_AVX) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_SSE42) -# define GLM_ARCH (GLM_ARCH_SSE42) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_SSE41) -# define GLM_ARCH (GLM_ARCH_SSE41) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_SSSE3) -# define GLM_ARCH (GLM_ARCH_SSSE3) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_SSE3) -# define GLM_ARCH (GLM_ARCH_SSE3) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_SSE2) -# define GLM_ARCH (GLM_ARCH_SSE2) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_SSE) -# define GLM_ARCH (GLM_ARCH_SSE) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_INTRINSICS) && !defined(GLM_FORCE_XYZW_ONLY) -# if defined(__AVX2__) -# define GLM_ARCH (GLM_ARCH_AVX2) -# elif defined(__AVX__) -# define GLM_ARCH (GLM_ARCH_AVX) -# elif defined(__SSE4_2__) -# define GLM_ARCH (GLM_ARCH_SSE42) -# elif defined(__SSE4_1__) -# define GLM_ARCH (GLM_ARCH_SSE41) -# elif defined(__SSSE3__) -# define GLM_ARCH (GLM_ARCH_SSSE3) -# elif defined(__SSE3__) -# define GLM_ARCH (GLM_ARCH_SSE3) -# elif defined(__SSE2__) || defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86_FP) -# define GLM_ARCH (GLM_ARCH_SSE2) -# elif defined(__i386__) -# define GLM_ARCH (GLM_ARCH_X86) -# elif defined(__ARM_ARCH) && (__ARM_ARCH >= 8) -# define GLM_ARCH (GLM_ARCH_ARMV8) -# elif defined(__ARM_NEON) -# define GLM_ARCH (GLM_ARCH_ARM | GLM_ARCH_NEON) -# elif defined(__arm__ ) || defined(_M_ARM) -# define GLM_ARCH (GLM_ARCH_ARM) -# elif defined(__mips__ ) -# define GLM_ARCH (GLM_ARCH_MIPS) -# elif defined(__powerpc__ ) || defined(_M_PPC) -# define GLM_ARCH (GLM_ARCH_PPC) -# else -# define GLM_ARCH (GLM_ARCH_UNKNOWN) -# endif -#else -# if defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86) || defined(__i386__) -# define GLM_ARCH (GLM_ARCH_X86) -# elif defined(__arm__) || defined(_M_ARM) -# define GLM_ARCH (GLM_ARCH_ARM) -# elif defined(__powerpc__) || defined(_M_PPC) -# define GLM_ARCH (GLM_ARCH_PPC) -# elif defined(__mips__) -# define GLM_ARCH (GLM_ARCH_MIPS) -# else -# define GLM_ARCH (GLM_ARCH_UNKNOWN) -# endif -#endif - -#if GLM_ARCH & GLM_ARCH_AVX2_BIT -# include -#elif GLM_ARCH & GLM_ARCH_AVX_BIT -# include -#elif GLM_ARCH & GLM_ARCH_SSE42_BIT -# if GLM_COMPILER & GLM_COMPILER_CLANG -# include -# endif -# include -#elif GLM_ARCH & GLM_ARCH_SSE41_BIT -# include -#elif GLM_ARCH & GLM_ARCH_SSSE3_BIT -# include -#elif GLM_ARCH & GLM_ARCH_SSE3_BIT -# include -#elif GLM_ARCH & GLM_ARCH_SSE2_BIT -# include -#elif GLM_ARCH & GLM_ARCH_NEON_BIT -# include "neon.h" -#endif//GLM_ARCH - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - typedef __m128 glm_f32vec4; - typedef __m128i glm_i32vec4; - typedef __m128i glm_u32vec4; - typedef __m128d glm_f64vec2; - typedef __m128i glm_i64vec2; - typedef __m128i glm_u64vec2; - - typedef glm_f32vec4 glm_vec4; - typedef glm_i32vec4 glm_ivec4; - typedef glm_u32vec4 glm_uvec4; - typedef glm_f64vec2 glm_dvec2; -#endif - -#if GLM_ARCH & GLM_ARCH_AVX_BIT - typedef __m256d glm_f64vec4; - typedef glm_f64vec4 glm_dvec4; -#endif - -#if GLM_ARCH & GLM_ARCH_AVX2_BIT - typedef __m256i glm_i64vec4; - typedef __m256i glm_u64vec4; -#endif - -#if GLM_ARCH & GLM_ARCH_NEON_BIT - typedef float32x4_t glm_f32vec4; - typedef int32x4_t glm_i32vec4; - typedef uint32x4_t glm_u32vec4; -#endif diff --git a/third_party/glm/simd/trigonometric.h b/third_party/glm/simd/trigonometric.h deleted file mode 100755 index 739b796..0000000 --- a/third_party/glm/simd/trigonometric.h +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref simd -/// @file glm/simd/trigonometric.h - -#pragma once - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT - diff --git a/third_party/glm/simd/vector_relational.h b/third_party/glm/simd/vector_relational.h deleted file mode 100755 index f7385e9..0000000 --- a/third_party/glm/simd/vector_relational.h +++ /dev/null @@ -1,8 +0,0 @@ -/// @ref simd -/// @file glm/simd/vector_relational.h - -#pragma once - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/trigonometric.hpp b/third_party/glm/trigonometric.hpp deleted file mode 100755 index fcf07f8..0000000 --- a/third_party/glm/trigonometric.hpp +++ /dev/null @@ -1,210 +0,0 @@ -/// @ref core -/// @file glm/trigonometric.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions -/// -/// @defgroup core_func_trigonometric Angle and Trigonometry Functions -/// @ingroup core -/// -/// Function parameters specified as angle are assumed to be in units of radians. -/// In no case will any of these functions result in a divide by zero error. If -/// the divisor of a ratio is 0, then results will be undefined. -/// -/// These all operate component-wise. The description is per component. -/// -/// Include to use these core features. -/// -/// @see ext_vector_trigonometric - -#pragma once - -#include "detail/setup.hpp" -#include "detail/qualifier.hpp" - -namespace glm -{ - /// @addtogroup core_func_trigonometric - /// @{ - - /// Converts degrees to radians and returns the result. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL radians man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec radians(vec const& degrees); - - /// Converts radians to degrees and returns the result. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL degrees man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec degrees(vec const& radians); - - /// The standard trigonometric sine function. - /// The values returned by this function will range from [-1, 1]. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL sin man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec sin(vec const& angle); - - /// The standard trigonometric cosine function. - /// The values returned by this function will range from [-1, 1]. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL cos man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec cos(vec const& angle); - - /// The standard trigonometric tangent function. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL tan man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec tan(vec const& angle); - - /// Arc sine. Returns an angle whose sine is x. - /// The range of values returned by this function is [-PI/2, PI/2]. - /// Results are undefined if |x| > 1. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL asin man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec asin(vec const& x); - - /// Arc cosine. Returns an angle whose sine is x. - /// The range of values returned by this function is [0, PI]. - /// Results are undefined if |x| > 1. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL acos man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec acos(vec const& x); - - /// Arc tangent. Returns an angle whose tangent is y/x. - /// The signs of x and y are used to determine what - /// quadrant the angle is in. The range of values returned - /// by this function is [-PI, PI]. Results are undefined - /// if x and y are both 0. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL atan man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec atan(vec const& y, vec const& x); - - /// Arc tangent. Returns an angle whose tangent is y_over_x. - /// The range of values returned by this function is [-PI/2, PI/2]. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL atan man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec atan(vec const& y_over_x); - - /// Returns the hyperbolic sine function, (exp(x) - exp(-x)) / 2 - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL sinh man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec sinh(vec const& angle); - - /// Returns the hyperbolic cosine function, (exp(x) + exp(-x)) / 2 - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL cosh man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec cosh(vec const& angle); - - /// Returns the hyperbolic tangent function, sinh(angle) / cosh(angle) - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL tanh man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec tanh(vec const& angle); - - /// Arc hyperbolic sine; returns the inverse of sinh. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL asinh man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec asinh(vec const& x); - - /// Arc hyperbolic cosine; returns the non-negative inverse - /// of cosh. Results are undefined if x < 1. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL acosh man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec acosh(vec const& x); - - /// Arc hyperbolic tangent; returns the inverse of tanh. - /// Results are undefined if abs(x) >= 1. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL atanh man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec atanh(vec const& x); - - /// @} -}//namespace glm - -#include "detail/func_trigonometric.inl" diff --git a/third_party/glm/vec2.hpp b/third_party/glm/vec2.hpp deleted file mode 100755 index be768bf..0000000 --- a/third_party/glm/vec2.hpp +++ /dev/null @@ -1,14 +0,0 @@ -/// @ref core -/// @file glm/vec2.hpp - -#pragma once -#include "./ext/vector_bool2.hpp" -#include "./ext/vector_bool2_precision.hpp" -#include "./ext/vector_float2.hpp" -#include "./ext/vector_float2_precision.hpp" -#include "./ext/vector_double2.hpp" -#include "./ext/vector_double2_precision.hpp" -#include "./ext/vector_int2.hpp" -#include "./ext/vector_int2_precision.hpp" -#include "./ext/vector_uint2.hpp" -#include "./ext/vector_uint2_precision.hpp" diff --git a/third_party/glm/vec3.hpp b/third_party/glm/vec3.hpp deleted file mode 100755 index f570722..0000000 --- a/third_party/glm/vec3.hpp +++ /dev/null @@ -1,14 +0,0 @@ -/// @ref core -/// @file glm/vec3.hpp - -#pragma once -#include "./ext/vector_bool3.hpp" -#include "./ext/vector_bool3_precision.hpp" -#include "./ext/vector_float3.hpp" -#include "./ext/vector_float3_precision.hpp" -#include "./ext/vector_double3.hpp" -#include "./ext/vector_double3_precision.hpp" -#include "./ext/vector_int3.hpp" -#include "./ext/vector_int3_precision.hpp" -#include "./ext/vector_uint3.hpp" -#include "./ext/vector_uint3_precision.hpp" diff --git a/third_party/glm/vec4.hpp b/third_party/glm/vec4.hpp deleted file mode 100755 index 9117020..0000000 --- a/third_party/glm/vec4.hpp +++ /dev/null @@ -1,15 +0,0 @@ -/// @ref core -/// @file glm/vec4.hpp - -#pragma once -#include "./ext/vector_bool4.hpp" -#include "./ext/vector_bool4_precision.hpp" -#include "./ext/vector_float4.hpp" -#include "./ext/vector_float4_precision.hpp" -#include "./ext/vector_double4.hpp" -#include "./ext/vector_double4_precision.hpp" -#include "./ext/vector_int4.hpp" -#include "./ext/vector_int4_precision.hpp" -#include "./ext/vector_uint4.hpp" -#include "./ext/vector_uint4_precision.hpp" - diff --git a/third_party/glm/vector_relational.hpp b/third_party/glm/vector_relational.hpp deleted file mode 100755 index a0fe17e..0000000 --- a/third_party/glm/vector_relational.hpp +++ /dev/null @@ -1,121 +0,0 @@ -/// @ref core -/// @file glm/vector_relational.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions -/// -/// @defgroup core_func_vector_relational Vector Relational Functions -/// @ingroup core -/// -/// Relational and equality operators (<, <=, >, >=, ==, !=) are defined to -/// operate on scalars and produce scalar Boolean results. For vector results, -/// use the following built-in functions. -/// -/// In all cases, the sizes of all the input and return vectors for any particular -/// call must match. -/// -/// Include to use these core features. -/// -/// @see ext_vector_relational - -#pragma once - -#include "detail/qualifier.hpp" -#include "detail/setup.hpp" - -namespace glm -{ - /// @addtogroup core_func_vector_relational - /// @{ - - /// Returns the component-wise comparison result of x < y. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T A floating-point or integer scalar type. - /// - /// @see GLSL lessThan man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec lessThan(vec const& x, vec const& y); - - /// Returns the component-wise comparison of result x <= y. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T A floating-point or integer scalar type. - /// - /// @see GLSL lessThanEqual man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec lessThanEqual(vec const& x, vec const& y); - - /// Returns the component-wise comparison of result x > y. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T A floating-point or integer scalar type. - /// - /// @see GLSL greaterThan man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec greaterThan(vec const& x, vec const& y); - - /// Returns the component-wise comparison of result x >= y. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T A floating-point or integer scalar type. - /// - /// @see GLSL greaterThanEqual man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec greaterThanEqual(vec const& x, vec const& y); - - /// Returns the component-wise comparison of result x == y. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T A floating-point, integer or bool scalar type. - /// - /// @see GLSL equal man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y); - - /// Returns the component-wise comparison of result x != y. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T A floating-point, integer or bool scalar type. - /// - /// @see GLSL notEqual man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y); - - /// Returns true if any component of x is true. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// - /// @see GLSL any man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR bool any(vec const& v); - - /// Returns true if all components of x are true. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// - /// @see GLSL all man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR bool all(vec const& v); - - /// Returns the component-wise logical complement of x. - /// /!\ Because of language incompatibilities between C++ and GLSL, GLM defines the function not but not_ instead. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// - /// @see GLSL not man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec not_(vec const& v); - - /// @} -}//namespace glm - -#include "detail/func_vector_relational.inl" diff --git a/third_party/kvf.h b/third_party/kvf.h new file mode 100755 index 0000000..a8b6008 --- /dev/null +++ b/third_party/kvf.h @@ -0,0 +1,2334 @@ +/*** + * MIT License + * + * Copyright (c) 2023-2024 kbz_8 + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * + * Do this: + * #define KVF_IMPLEMENTATION + * before you include this file in *one* C or C++ file to create the implementation. + * + * // i.e. it should look like this: + * #include ... + * #include ... + * #include ... + * #define KVF_IMPLEMENTATION + * #include "kvf.h" + * + * You can #define KVF_ASSERT(x) before the #include to avoid using assert.h. + * And #define KVF_MALLOC, KVF_REALLOC, and KVF_FREE to avoid using malloc, realloc, free. + * + * By default KVF exits the program if a call to the Vulkan API fails. You can avoid that + * by using #define KVF_NO_EXIT_ON_FAILURE + * + * If you are using Volk or any other meta loader you must define KVF_IMPL_VK_NO_PROTOTYPES + * or VK_NO_PROTOTYPES before including this file to avoid conflicts with Vulkan prototypes. + * + * You can also #define KVF_ENABLE_VALIDATION_LAYERS to enable validation layers. + */ + +#ifndef KBZ_8_VULKAN_FRAMEWORK_H +#define KBZ_8_VULKAN_FRAMEWORK_H + +#ifdef KVF_IMPL_VK_NO_PROTOTYPES + #define VK_NO_PROTOTYPES +#endif + +#include + +#include +#include + +/* ============================================= Prototypes ============================================= */ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum +{ + KVF_GRAPHICS_QUEUE = 0, + KVF_PRESENT_QUEUE = 1, + KVF_COMPUTE_QUEUE = 2 +} KvfQueueType; + +typedef enum +{ + KVF_IMAGE_COLOR = 0, + KVF_IMAGE_DEPTH = 1, + KVF_IMAGE_DEPTH_ARRAY = 2, + KVF_IMAGE_CUBE = 3, + KVF_IMAGE_OTHER = 4, +} KvfImageType; + +typedef void (*KvfErrorCallback)(const char* message); + +typedef struct KvfGraphicsPipelineBuilder KvfGraphicsPipelineBuilder; + +void kvfSetErrorCallback(KvfErrorCallback callback); +void kvfSetValidationErrorCallback(KvfErrorCallback callback); +void kvfSetValidationWarningCallback(KvfErrorCallback callback); + +void kvfAddLayer(const char* layer); + +VkInstance kvfCreateInstance(const char** extensionsEnabled, uint32_t extensionsCount); +void kvfDestroyInstance(VkInstance instance); + +VkPhysicalDevice kvfPickFirstPhysicalDevice(VkInstance instance); +VkPhysicalDevice kvfPickGoodDefaultPhysicalDevice(VkInstance instance, VkSurfaceKHR surface); +VkPhysicalDevice kvfPickGoodPhysicalDevice(VkInstance instance, VkSurfaceKHR surface, const char** deviceExtensions, uint32_t deviceExtensionsCount); + +VkQueue kvfGetDeviceQueue(VkDevice device, KvfQueueType queue); +uint32_t kvfGetDeviceQueueFamily(VkDevice device, KvfQueueType queue); +bool kvfQueuePresentKHR(VkDevice device, VkSemaphore wait, VkSwapchainKHR swapchain, uint32_t image_index); // return false when the swapchain must be recreated + +VkDevice kvfCreateDefaultDevice(VkPhysicalDevice physical); +VkDevice kvfCreateDevice(VkPhysicalDevice physical, const char** extensions, uint32_t extensions_count); +void kvfDestroyDevice(VkDevice device); + +VkFence kvfCreateFence(VkDevice device); +void kvfWaitForFence(VkDevice device, VkFence fence); +void kvfDestroyFence(VkDevice device, VkFence fence); + +VkSemaphore kvfCreateSemaphore(VkDevice device); +void kvfDestroySemaphore(VkDevice device, VkSemaphore semaphore); + +VkSwapchainKHR kvfCreateSwapchainKHR(VkDevice device, VkPhysicalDevice physical, VkSurfaceKHR surface, VkExtent2D extent, bool tryVsync); +VkFormat kvfGetSwapchainImagesFormat(VkSwapchainKHR swapchain); +uint32_t kvfGetSwapchainImagesCount(VkSwapchainKHR swapchain); +uint32_t kvfGetSwapchainMinImagesCount(VkSwapchainKHR swapchain); +VkExtent2D kvfGetSwapchainImagesSize(VkSwapchainKHR swapchain); +void kvfDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain); + +VkImage kvfCreateImage(VkDevice device, uint32_t width, uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage); +void kvfImageBufferToBuffer(VkCommandBuffer cmd, VkBuffer dst, VkImage src, size_t size); +void kvfDestroyImage(VkDevice device, VkImage image); +VkImageView kvfCreateImageView(VkDevice device, VkImage image, VkFormat format, VkImageViewType type, VkImageAspectFlags aspect); +void kvfDestroyImageView(VkDevice device, VkImageView image_view); +void kvfTransitionImageLayout(VkDevice device, VkImage image, VkCommandBuffer cmd, VkFormat format, VkImageLayout old_layout, VkImageLayout new_layout, bool is_single_time_cmd_buffer); +VkSampler kvfCreateSampler(VkDevice device, VkFilter filters, VkSamplerAddressMode address_modes, VkSamplerMipmapMode mipmap_mode); +void kvfDestroySampler(VkDevice device, VkSampler sampler); + +VkBuffer kvfCreateBuffer(VkDevice device, VkBufferUsageFlags usage, VkDeviceSize size); +void kvfCopyBufferToBuffer(VkCommandBuffer cmd, VkBuffer dst, VkBuffer src, size_t size); +void kvfCopyBufferToImage(VkCommandBuffer cmd, VkImage dst, VkBuffer src, size_t buffer_offset, VkImageAspectFlagBits aspect, VkExtent3D extent); +void kvfDestroyBuffer(VkDevice device, VkBuffer buffer); + +VkFramebuffer kvfCreateFramebuffer(VkDevice device, VkRenderPass renderpass, VkImageView* image_views, size_t image_views_count, VkExtent2D extent); +VkExtent2D kvfGetFramebufferSize(VkFramebuffer buffer); +void kvfDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer); + +VkCommandBuffer kvfCreateCommandBuffer(VkDevice device); +VkCommandBuffer kvfCreateCommandBufferLeveled(VkDevice device, VkCommandBufferLevel level); +void kvfBeginCommandBuffer(VkCommandBuffer buffer, VkCommandBufferUsageFlags flags); +void kvfEndCommandBuffer(VkCommandBuffer buffer); +void kvfSubmitCommandBuffer(VkDevice device, VkCommandBuffer buffer, KvfQueueType queue, VkSemaphore signal, VkSemaphore wait, VkFence fence, VkPipelineStageFlags* stages); +void kvfSubmitSingleTimeCommandBuffer(VkDevice device, VkCommandBuffer buffer, KvfQueueType queue, VkFence fence); + +VkAttachmentDescription kvfBuildAttachmentDescription(KvfImageType type, VkFormat format, VkImageLayout initial, VkImageLayout final, bool clear); +VkAttachmentDescription kvfBuildSwapchainAttachmentDescription(VkSwapchainKHR swapchain, bool clear); + +VkRenderPass kvfCreateRenderPass(VkDevice device, VkAttachmentDescription* attachments, size_t attachments_count, VkPipelineBindPoint bind_point); +void kvfDestroyRenderPass(VkDevice device, VkRenderPass renderpass); +void kvfBeginRenderPass(VkRenderPass pass, VkCommandBuffer cmd, VkFramebuffer framebuffer, VkExtent2D framebuffer_extent, VkClearValue* clears, size_t clears_count); + +VkShaderModule kvfCreateShaderModule(VkDevice device, uint32_t* code, size_t size); +void kvfDestroyShaderModule(VkDevice device, VkShaderModule shader); + +const char* kvfVerbaliseVkResult(VkResult result); + +bool kvfIsStencilFormat(VkFormat format); +bool kvfIsDepthFormat(VkFormat format); +uint32_t kvfFormatSize(VkFormat format); +VkPipelineStageFlags kvfLayoutToAccessMask(VkImageLayout layout, bool is_destination); +VkPipelineStageFlags kvfAccessFlagsToPipelineStage(VkAccessFlags access_flags, VkPipelineStageFlags stage_flags); +VkFormat kvfFindSupportFormatInCandidates(VkDevice device, VkFormat* candidates, size_t candidates_count, VkImageTiling tiling, VkFormatFeatureFlags flags); + +VkDescriptorSetLayout kvfCreateDescriptorSetLayout(VkDevice device, VkDescriptorSetLayoutBinding* bindings, size_t bindings_count); +void kvfDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout layout); + +VkDescriptorSet kvfAllocateDescriptorSet(VkDevice device, VkDescriptorSetLayout layout); +void kvfUpdateStorageBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding); +void kvfUpdateUniformBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding); +void kvfUpdateImageToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorImageInfo* info, uint32_t binding); +VkWriteDescriptorSet kvfWriteStorageBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding); +VkWriteDescriptorSet kvfWriteUniformBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding); +VkWriteDescriptorSet kvfWriteImageToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorImageInfo* info, uint32_t binding); + +void kvfResetDeviceDescriptorPools(VkDevice device); + +VkPipelineLayout kvfCreatePipelineLayout(VkDevice device, VkDescriptorSetLayout* set_layouts, size_t set_layouts_count, VkPushConstantRange* pc, size_t pc_count); +void kvfDestroyPipelineLayout(VkDevice device, VkPipelineLayout layout); + +KvfGraphicsPipelineBuilder* kvfCreateGPipelineBuilder(); +void kvfDestroyGPipelineBuilder(KvfGraphicsPipelineBuilder* builder); + +void kvfGPipelineBuilderReset(KvfGraphicsPipelineBuilder* builder); +void kvfGPipelineBuilderSetInputTopology(KvfGraphicsPipelineBuilder* builder, VkPrimitiveTopology topology); +void kvfGPipelineBuilderSetPolygonMode(KvfGraphicsPipelineBuilder* builder, VkPolygonMode polygon, float line_width); +void kvfGPipelineBuilderSetCullMode(KvfGraphicsPipelineBuilder* builder, VkCullModeFlags cull, VkFrontFace face); +void kvfGPipelineBuilderDisableBlending(KvfGraphicsPipelineBuilder* builder); +void kvfGPipelineBuilderEnableAdditiveBlending(KvfGraphicsPipelineBuilder* builder); +void kvfGPipelineBuilderEnableAlphaBlending(KvfGraphicsPipelineBuilder* builder); +void kvfGPipelineBuilderEnableDepthTest(KvfGraphicsPipelineBuilder* builder, VkCompareOp op, bool write_enabled); +void kvfGPipelineBuilderDisableDepthTest(KvfGraphicsPipelineBuilder* builder); +void kvfGPipelineBuilderSetVertexInputs(KvfGraphicsPipelineBuilder* builder, VkVertexInputBindingDescription binds, VkVertexInputAttributeDescription* attributes, size_t attributes_count); +void kvfGPipelineBuilderAddShaderStage(KvfGraphicsPipelineBuilder* builder, VkShaderStageFlagBits stage, VkShaderModule module, const char* entry); +void kvfGPipelineBuilderResetShaderStages(KvfGraphicsPipelineBuilder* builder); + +VkPipeline kvfCreateGraphicsPipeline(VkDevice device, VkPipelineLayout layout, KvfGraphicsPipelineBuilder* builder, VkRenderPass pass); +void kvfDestroyPipeline(VkDevice device, VkPipeline pipeline); + +#ifdef __cplusplus +} +#endif + +#endif // KBZ_8_VULKAN_FRAMEWORK_H + +/* ========================================== Implementation =========================================== */ + +#ifdef KVF_IMPLEMENTATION + +#ifndef KVF_MALLOC + #define KVF_MALLOC(x) malloc(x) +#endif +#ifndef KVF_REALLOC + #define KVF_REALLOC(x, s) realloc(x, s) +#endif +#ifndef KVF_FREE + #define KVF_FREE(x) free(x) +#endif +#ifndef KVF_ASSERT + #include + #define KVF_ASSERT(x) assert(x) +#endif + +#include +#include +#include + +#ifdef KVF_DESCRIPTOR_POOL_CAPACITY + #undef KVF_DESCRIPTOR_POOL_CAPACITY +#endif +#define KVF_DESCRIPTOR_POOL_CAPACITY 512 + +typedef struct +{ + int32_t graphics; + int32_t present; + int32_t compute; +} __KvfQueueFamilies; + +typedef struct +{ + VkDescriptorPool pool; + size_t capacity; + size_t size; +} __KvfDescriptorPool; + +typedef struct +{ + VkDevice device; + VkPhysicalDevice physical; + VkCommandPool cmd_pool; + __KvfQueueFamilies queues; + __KvfDescriptorPool* sets_pools; + size_t sets_pools_size; +} __KvfDevice; + +typedef struct +{ + VkSurfaceCapabilitiesKHR capabilities; + VkSurfaceFormatKHR* formats; + VkPresentModeKHR* presentModes; + uint32_t formatsCount; + uint32_t presentModesCount; +} __KvfSwapchainSupportInternal; + +typedef struct +{ + __KvfSwapchainSupportInternal support; + VkSwapchainKHR swapchain; + VkExtent2D images_extent; + VkFormat images_format; + uint32_t images_count; +} __KvfSwapchain; + +typedef struct +{ + VkFramebuffer framebuffer; + VkExtent2D extent; +} __KvfFramebuffer; + +struct KvfGraphicsPipelineBuilder +{ + VkPipelineShaderStageCreateInfo* shader_stages; + VkPipelineVertexInputStateCreateInfo vertex_input_state; + VkPipelineInputAssemblyStateCreateInfo input_assembly_state; + VkPipelineTessellationStateCreateInfo tessellation_state; + VkPipelineRasterizationStateCreateInfo rasterization_state; + VkPipelineDepthStencilStateCreateInfo depth_stencil_state; + VkPipelineColorBlendAttachmentState color_blend_attachment_state; + size_t shader_stages_count; +}; + +// Dynamic arrays +__KvfDevice* __kvf_internal_devices = NULL; +size_t __kvf_internal_devices_size = 0; +size_t __kvf_internal_devices_capacity = 0; + +__KvfSwapchain* __kvf_internal_swapchains = NULL; +size_t __kvf_internal_swapchains_size = 0; +size_t __kvf_internal_swapchains_capacity = 0; + +__KvfFramebuffer* __kvf_internal_framebuffers = NULL; +size_t __kvf_internal_framebuffers_size = 0; +size_t __kvf_internal_framebuffers_capacity = 0; + +#ifdef KVF_ENABLE_VALIDATION_LAYERS + VkDebugUtilsMessengerEXT __kvf_debug_messenger = VK_NULL_HANDLE; + char** __kvf_extra_layers = NULL; + size_t __kvf_extra_layers_count = 0; +#endif + +KvfErrorCallback __kvf_error_callback = NULL; +KvfErrorCallback __kvf_validation_error_callback = NULL; +KvfErrorCallback __kvf_validation_warning_callback = NULL; + +void __kvfCheckVk(VkResult result, const char* function) +{ + if(result != VK_SUCCESS) + { + if(__kvf_error_callback != NULL) + { + char buffer[1024]; + snprintf(buffer, 1024, "KVF Vulkan error in '%s': %s", function, kvfVerbaliseVkResult(result)); + __kvf_error_callback(buffer); + return; + } + fprintf(stderr, "KVF Vulkan error in '%s': %s\n", function, kvfVerbaliseVkResult(result)); + #ifndef KVF_NO_EXIT_ON_FAILURE + exit(EXIT_FAILURE); + #endif + } +} + +#undef __kvfCheckVk +#define __kvfCheckVk(res) __kvfCheckVk(res, __FUNCTION__) + +void __kvfAddDeviceToArray(VkPhysicalDevice device, int32_t graphics_queue, int32_t present_queue) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + if(__kvf_internal_devices_size == __kvf_internal_devices_capacity) + { + // Resize the dynamic array if necessary + __kvf_internal_devices_capacity += 2; + __kvf_internal_devices = (__KvfDevice*)KVF_REALLOC(__kvf_internal_devices, __kvf_internal_devices_capacity * sizeof(__KvfDevice)); + } + + __kvf_internal_devices[__kvf_internal_devices_size].physical = device; + __kvf_internal_devices[__kvf_internal_devices_size].queues.graphics = graphics_queue; + __kvf_internal_devices[__kvf_internal_devices_size].queues.present = present_queue; + __kvf_internal_devices_size++; +} + +void __kvfCompleteDevice(VkPhysicalDevice physical, VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(physical != VK_NULL_HANDLE); + + __KvfDevice* kvf_device = NULL; + + for(size_t i = 0; i < __kvf_internal_devices_size; i++) + { + if(__kvf_internal_devices[i].physical == physical) + kvf_device = &__kvf_internal_devices[i]; + } + + KVF_ASSERT(kvf_device != NULL); + + VkCommandPool pool; + VkCommandPoolCreateInfo pool_info = {}; + pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; + pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; + pool_info.queueFamilyIndex = kvf_device->queues.graphics; + __kvfCheckVk(vkCreateCommandPool(device, &pool_info, NULL, &pool)); + + kvf_device->device = device; + kvf_device->cmd_pool = pool; + kvf_device->sets_pools = NULL; + kvf_device->sets_pools_size = 0; +} + +void __kvfDestroyDescriptorPools(VkDevice device); + +void __kvfDestroyDevice(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + for(size_t i = 0; i < __kvf_internal_devices_size; i++) + { + if(__kvf_internal_devices[i].device == device) + { + vkDestroyCommandPool(device, __kvf_internal_devices[i].cmd_pool, NULL); + __kvfDestroyDescriptorPools(device); + vkDestroyDevice(device, NULL); + // Shift the elements to fill the gap + for(size_t j = i; j < __kvf_internal_devices_size - 1; j++) + __kvf_internal_devices[j] = __kvf_internal_devices[j + 1]; + __kvf_internal_devices_size--; + if(__kvf_internal_devices_size == 0) + { + KVF_FREE(__kvf_internal_devices); + __kvf_internal_devices_capacity = 0; + } + return; + } + } +} + +__KvfDevice* __kvfGetKvfDeviceFromVkPhysicalDevice(VkPhysicalDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + for(size_t i = 0; i < __kvf_internal_devices_size; i++) + { + if(__kvf_internal_devices[i].physical == device) + return &__kvf_internal_devices[i]; + } + return NULL; +} + +__KvfDevice* __kvfGetKvfDeviceFromVkDevice(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + for(size_t i = 0; i < __kvf_internal_devices_size; i++) + { + if(__kvf_internal_devices[i].device == device) + return &__kvf_internal_devices[i]; + } + return NULL; +} + +void __kvfAddSwapchainToArray(VkSwapchainKHR swapchain, __KvfSwapchainSupportInternal support, VkFormat format, uint32_t images_count, VkExtent2D extent) +{ + KVF_ASSERT(swapchain != VK_NULL_HANDLE); + if(__kvf_internal_swapchains_size == __kvf_internal_swapchains_capacity) + { + // Resize the dynamic array if necessary + __kvf_internal_swapchains_capacity += 2; + __kvf_internal_swapchains = (__KvfSwapchain*)KVF_REALLOC(__kvf_internal_swapchains, __kvf_internal_swapchains_capacity * sizeof(__KvfSwapchain)); + } + + __kvf_internal_swapchains[__kvf_internal_swapchains_size].swapchain = swapchain; + __kvf_internal_swapchains[__kvf_internal_swapchains_size].support = support; + __kvf_internal_swapchains[__kvf_internal_swapchains_size].images_format = format; + __kvf_internal_swapchains[__kvf_internal_swapchains_size].images_count = images_count; + __kvf_internal_swapchains[__kvf_internal_swapchains_size].images_extent = extent; + __kvf_internal_swapchains_size++; +} + +void __kvfDestroySwapchain(VkDevice device, VkSwapchainKHR swapchain) +{ + KVF_ASSERT(swapchain != VK_NULL_HANDLE); + KVF_ASSERT(device != VK_NULL_HANDLE); + + for(size_t i = 0; i < __kvf_internal_swapchains_size; i++) + { + if(__kvf_internal_swapchains[i].swapchain == swapchain) + { + vkDestroySwapchainKHR(device, swapchain, NULL); + // Shift the elements to fill the gap + for(size_t j = i; j < __kvf_internal_swapchains_size - 1; j++) + __kvf_internal_swapchains[j] = __kvf_internal_swapchains[j + 1]; + __kvf_internal_swapchains_size--; + if(__kvf_internal_swapchains_size == 0) + { + KVF_FREE(__kvf_internal_swapchains); + __kvf_internal_swapchains_capacity = 0; + } + return; + } + } +} + +__KvfSwapchain* __kvfGetKvfSwapchainFromVkSwapchainKHR(VkSwapchainKHR swapchain) +{ + KVF_ASSERT(swapchain != VK_NULL_HANDLE); + for(size_t i = 0; i < __kvf_internal_swapchains_size; i++) + { + if(__kvf_internal_swapchains[i].swapchain == swapchain) + return &__kvf_internal_swapchains[i]; + } + return NULL; +} + +void __kvfAddFramebufferToArray(VkFramebuffer framebuffer, VkExtent2D extent) +{ + KVF_ASSERT(framebuffer != VK_NULL_HANDLE); + if(__kvf_internal_framebuffers_size == __kvf_internal_framebuffers_capacity) + { + // Resize the dynamic array if necessary + __kvf_internal_framebuffers_capacity += 2; + __kvf_internal_framebuffers = (__KvfFramebuffer*)KVF_REALLOC(__kvf_internal_framebuffers, __kvf_internal_framebuffers_capacity * sizeof(__KvfFramebuffer)); + } + + __kvf_internal_framebuffers[__kvf_internal_framebuffers_size].framebuffer = framebuffer; + __kvf_internal_framebuffers[__kvf_internal_framebuffers_size].extent = extent; + __kvf_internal_framebuffers_size++; +} + +void __kvfDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer) +{ + KVF_ASSERT(framebuffer != VK_NULL_HANDLE); + KVF_ASSERT(device != VK_NULL_HANDLE); + + for(size_t i = 0; i < __kvf_internal_framebuffers_size; i++) + { + if(__kvf_internal_framebuffers[i].framebuffer == framebuffer) + { + vkDestroyFramebuffer(device, framebuffer, NULL); + // Shift the elements to fill the gap + for(size_t j = i; j < __kvf_internal_framebuffers_size - 1; j++) + __kvf_internal_framebuffers[j] = __kvf_internal_framebuffers[j + 1]; + __kvf_internal_framebuffers_size--; + if(__kvf_internal_framebuffers_size == 0) + { + KVF_FREE(__kvf_internal_framebuffers); + __kvf_internal_framebuffers_capacity = 0; + } + return; + } + } +} + +__KvfFramebuffer* __kvfGetKvfSwapchainFromVkFramebuffer(VkFramebuffer framebuffer) +{ + KVF_ASSERT(framebuffer != VK_NULL_HANDLE); + for(size_t i = 0; i < __kvf_internal_framebuffers_size; i++) + { + if(__kvf_internal_framebuffers[i].framebuffer == framebuffer) + return &__kvf_internal_framebuffers[i]; + } + return NULL; +} + +VkDescriptorPool __kvfDeviceCreateDescriptorPool(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + kvf_device->sets_pools_size++; + kvf_device->sets_pools = (__KvfDescriptorPool*)KVF_REALLOC(kvf_device->sets_pools, kvf_device->sets_pools_size * sizeof(__KvfDescriptorPool)); + memset(&kvf_device->sets_pools[kvf_device->sets_pools_size - 1], 0, sizeof(__KvfDescriptorPool)); + + VkDescriptorPoolSize pool_sizes[] = { + { VK_DESCRIPTOR_TYPE_SAMPLER, 1024 }, + { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1024 }, + { VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1024 }, + { VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1024 }, + { VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1024 }, + { VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1024 }, + { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1024 }, + { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1024 }, + { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1024 }, + { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1024 }, + { VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1024 } + }; + + VkDescriptorPoolCreateInfo pool_info = {}; + pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; + pool_info.poolSizeCount = sizeof(pool_sizes) / sizeof(VkDescriptorPoolSize); + pool_info.pPoolSizes = pool_sizes; + pool_info.maxSets = KVF_DESCRIPTOR_POOL_CAPACITY; + pool_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; + + __kvfCheckVk(vkCreateDescriptorPool(device, &pool_info, NULL, &kvf_device->sets_pools[kvf_device->sets_pools_size - 1].pool)); + kvf_device->sets_pools[kvf_device->sets_pools_size - 1].capacity = KVF_DESCRIPTOR_POOL_CAPACITY; + return kvf_device->sets_pools[kvf_device->sets_pools_size - 1].pool; +} + +void __kvfDestroyDescriptorPools(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + + for(size_t i = 0; i < kvf_device->sets_pools_size; i++) + vkDestroyDescriptorPool(device, kvf_device->sets_pools[i].pool, NULL); + KVF_FREE(kvf_device->sets_pools); + kvf_device->sets_pools_size = 0; +} + +void kvfSetErrorCallback(KvfErrorCallback callback) +{ + __kvf_error_callback = callback; +} + +void kvfSetValidationErrorCallback(KvfErrorCallback callback) +{ + __kvf_validation_error_callback = callback; +} + +void kvfSetValidationWarningCallback(KvfErrorCallback callback) +{ + __kvf_validation_warning_callback = callback; +} + +bool kvfIsStencilFormat(VkFormat format) +{ + switch(format) + { + case VK_FORMAT_D32_SFLOAT_S8_UINT: + case VK_FORMAT_D24_UNORM_S8_UINT: + return true; + + default: return false; + } +} + +bool kvfIsDepthFormat(VkFormat format) +{ + switch(format) + { + case VK_FORMAT_D16_UNORM: + case VK_FORMAT_D32_SFLOAT: + case VK_FORMAT_D32_SFLOAT_S8_UINT: + case VK_FORMAT_D24_UNORM_S8_UINT: + case VK_FORMAT_D16_UNORM_S8_UINT: + return true; + + default: return false; + } +} + +VkPipelineStageFlags kvfLayoutToAccessMask(VkImageLayout layout, bool is_destination) +{ + VkPipelineStageFlags access_mask = 0; + + switch(layout) + { + case VK_IMAGE_LAYOUT_UNDEFINED: + if(is_destination) + KVF_ASSERT(false && "Vulkan : the new layout used in a transition must not be VK_IMAGE_LAYOUT_UNDEFINED"); + break; + case VK_IMAGE_LAYOUT_GENERAL: access_mask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; break; + case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: access_mask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; break; + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; break; + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: + access_mask = VK_ACCESS_SHADER_READ_BIT; // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; + break; + case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: access_mask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT; break; + case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: access_mask = VK_ACCESS_TRANSFER_READ_BIT; break; + case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: access_mask = VK_ACCESS_TRANSFER_WRITE_BIT; break; + case VK_IMAGE_LAYOUT_PREINITIALIZED: + if(!is_destination) + access_mask = VK_ACCESS_HOST_WRITE_BIT; + else + KVF_ASSERT(false && "Vulkan : the new layout used in a transition must not be VK_IMAGE_LAYOUT_PREINITIALIZED"); + break; + case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; break; + case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; break; + case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: access_mask = VK_ACCESS_MEMORY_READ_BIT; break; + + default: KVF_ASSERT(false && "Vulkan : unexpected image layout"); break; + } + + return access_mask; +} + +VkPipelineStageFlags kvfAccessFlagsToPipelineStage(VkAccessFlags access_flags, VkPipelineStageFlags stage_flags) +{ + VkPipelineStageFlags stages = 0; + + while(access_flags != 0) + { + VkAccessFlagBits _access_flag = (VkAccessFlagBits)(access_flags & (~(access_flags - 1))); + if(_access_flag == 0 || (_access_flag & (_access_flag - 1)) != 0) + KVF_ASSERT(false && "Vulkan : an error has been caught during access flag to pipeline stage operation"); + access_flags &= ~_access_flag; + + switch(_access_flag) + { + case VK_ACCESS_INDIRECT_COMMAND_READ_BIT: stages |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; break; + case VK_ACCESS_INDEX_READ_BIT: stages |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; break; + case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT: stages |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; break; + case VK_ACCESS_UNIFORM_READ_BIT: stages |= stage_flags | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; break; + case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT: stages |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; break; + case VK_ACCESS_SHADER_READ_BIT: stages |= stage_flags | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; break; + case VK_ACCESS_SHADER_WRITE_BIT: stages |= stage_flags | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; break; + case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: stages |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; break; + case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: stages |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; break; + case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT: stages |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; break; + case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT: stages |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; break; + case VK_ACCESS_TRANSFER_READ_BIT: stages |= VK_PIPELINE_STAGE_TRANSFER_BIT; break; + case VK_ACCESS_TRANSFER_WRITE_BIT: stages |= VK_PIPELINE_STAGE_TRANSFER_BIT; break; + case VK_ACCESS_HOST_READ_BIT: stages |= VK_PIPELINE_STAGE_HOST_BIT; break; + case VK_ACCESS_HOST_WRITE_BIT: stages |= VK_PIPELINE_STAGE_HOST_BIT; break; + case VK_ACCESS_MEMORY_READ_BIT: break; + case VK_ACCESS_MEMORY_WRITE_BIT: break; + + default: KVF_ASSERT(false && "Vulkan : unknown access flag"); break; + } + } + return stages; +} + +VkFormat kvfFindSupportFormatInCandidates(VkDevice device, VkFormat* candidates, size_t candidates_count, VkImageTiling tiling, VkFormatFeatureFlags flags) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + for(size_t i = 0; i < candidates_count; i++) + { + VkFormatProperties props; + vkGetPhysicalDeviceFormatProperties(kvf_device->physical, candidates[i], &props); + if(tiling == VK_IMAGE_TILING_LINEAR && (props.linearTilingFeatures & flags) == flags) + return candidates[i]; + else if(tiling == VK_IMAGE_TILING_OPTIMAL && (props.optimalTilingFeatures & flags) == flags) + return candidates[i]; + } + + KVF_ASSERT(false && "Vulkan : failed to find image format"); + return VK_FORMAT_R8G8B8A8_SRGB; // just to avoir warning +} + +uint32_t kvfFormatSize(VkFormat format) +{ + switch(format) + { + case VK_FORMAT_UNDEFINED: return 0; + case VK_FORMAT_R4G4_UNORM_PACK8: return 1; + case VK_FORMAT_R4G4B4A4_UNORM_PACK16: return 2; + case VK_FORMAT_B4G4R4A4_UNORM_PACK16: return 2; + case VK_FORMAT_R5G6B5_UNORM_PACK16: return 2; + case VK_FORMAT_B5G6R5_UNORM_PACK16: return 2; + case VK_FORMAT_R5G5B5A1_UNORM_PACK16: return 2; + case VK_FORMAT_B5G5R5A1_UNORM_PACK16: return 2; + case VK_FORMAT_A1R5G5B5_UNORM_PACK16: return 2; + case VK_FORMAT_R8_UNORM: return 1; + case VK_FORMAT_R8_SNORM: return 1; + case VK_FORMAT_R8_USCALED: return 1; + case VK_FORMAT_R8_SSCALED: return 1; + case VK_FORMAT_R8_UINT: return 1; + case VK_FORMAT_R8_SINT: return 1; + case VK_FORMAT_R8_SRGB: return 1; + case VK_FORMAT_R8G8_UNORM: return 2; + case VK_FORMAT_R8G8_SNORM: return 2; + case VK_FORMAT_R8G8_USCALED: return 2; + case VK_FORMAT_R8G8_SSCALED: return 2; + case VK_FORMAT_R8G8_UINT: return 2; + case VK_FORMAT_R8G8_SINT: return 2; + case VK_FORMAT_R8G8_SRGB: return 2; + case VK_FORMAT_R8G8B8_UNORM: return 3; + case VK_FORMAT_R8G8B8_SNORM: return 3; + case VK_FORMAT_R8G8B8_USCALED: return 3; + case VK_FORMAT_R8G8B8_SSCALED: return 3; + case VK_FORMAT_R8G8B8_UINT: return 3; + case VK_FORMAT_R8G8B8_SINT: return 3; + case VK_FORMAT_R8G8B8_SRGB: return 3; + case VK_FORMAT_B8G8R8_UNORM: return 3; + case VK_FORMAT_B8G8R8_SNORM: return 3; + case VK_FORMAT_B8G8R8_USCALED: return 3; + case VK_FORMAT_B8G8R8_SSCALED: return 3; + case VK_FORMAT_B8G8R8_UINT: return 3; + case VK_FORMAT_B8G8R8_SINT: return 3; + case VK_FORMAT_B8G8R8_SRGB: return 3; + case VK_FORMAT_R8G8B8A8_UNORM: return 4; + case VK_FORMAT_R8G8B8A8_SNORM: return 4; + case VK_FORMAT_R8G8B8A8_USCALED: return 4; + case VK_FORMAT_R8G8B8A8_SSCALED: return 4; + case VK_FORMAT_R8G8B8A8_UINT: return 4; + case VK_FORMAT_R8G8B8A8_SINT: return 4; + case VK_FORMAT_R8G8B8A8_SRGB: return 4; + case VK_FORMAT_B8G8R8A8_UNORM: return 4; + case VK_FORMAT_B8G8R8A8_SNORM: return 4; + case VK_FORMAT_B8G8R8A8_USCALED: return 4; + case VK_FORMAT_B8G8R8A8_SSCALED: return 4; + case VK_FORMAT_B8G8R8A8_UINT: return 4; + case VK_FORMAT_B8G8R8A8_SINT: return 4; + case VK_FORMAT_B8G8R8A8_SRGB: return 4; + case VK_FORMAT_A8B8G8R8_UNORM_PACK32: return 4; + case VK_FORMAT_A8B8G8R8_SNORM_PACK32: return 4; + case VK_FORMAT_A8B8G8R8_USCALED_PACK32: return 4; + case VK_FORMAT_A8B8G8R8_SSCALED_PACK32: return 4; + case VK_FORMAT_A8B8G8R8_UINT_PACK32: return 4; + case VK_FORMAT_A8B8G8R8_SINT_PACK32: return 4; + case VK_FORMAT_A8B8G8R8_SRGB_PACK32: return 4; + case VK_FORMAT_A2R10G10B10_UNORM_PACK32: return 4; + case VK_FORMAT_A2R10G10B10_SNORM_PACK32: return 4; + case VK_FORMAT_A2R10G10B10_USCALED_PACK32: return 4; + case VK_FORMAT_A2R10G10B10_SSCALED_PACK32: return 4; + case VK_FORMAT_A2R10G10B10_UINT_PACK32: return 4; + case VK_FORMAT_A2R10G10B10_SINT_PACK32: return 4; + case VK_FORMAT_A2B10G10R10_UNORM_PACK32: return 4; + case VK_FORMAT_A2B10G10R10_SNORM_PACK32: return 4; + case VK_FORMAT_A2B10G10R10_USCALED_PACK32: return 4; + case VK_FORMAT_A2B10G10R10_SSCALED_PACK32: return 4; + case VK_FORMAT_A2B10G10R10_UINT_PACK32: return 4; + case VK_FORMAT_A2B10G10R10_SINT_PACK32: return 4; + case VK_FORMAT_R16_UNORM: return 2; + case VK_FORMAT_R16_SNORM: return 2; + case VK_FORMAT_R16_USCALED: return 2; + case VK_FORMAT_R16_SSCALED: return 2; + case VK_FORMAT_R16_UINT: return 2; + case VK_FORMAT_R16_SINT: return 2; + case VK_FORMAT_R16_SFLOAT: return 2; + case VK_FORMAT_R16G16_UNORM: return 4; + case VK_FORMAT_R16G16_SNORM: return 4; + case VK_FORMAT_R16G16_USCALED: return 4; + case VK_FORMAT_R16G16_SSCALED: return 4; + case VK_FORMAT_R16G16_UINT: return 4; + case VK_FORMAT_R16G16_SINT: return 4; + case VK_FORMAT_R16G16_SFLOAT: return 4; + case VK_FORMAT_R16G16B16_UNORM: return 6; + case VK_FORMAT_R16G16B16_SNORM: return 6; + case VK_FORMAT_R16G16B16_USCALED: return 6; + case VK_FORMAT_R16G16B16_SSCALED: return 6; + case VK_FORMAT_R16G16B16_UINT: return 6; + case VK_FORMAT_R16G16B16_SINT: return 6; + case VK_FORMAT_R16G16B16_SFLOAT: return 6; + case VK_FORMAT_R16G16B16A16_UNORM: return 8; + case VK_FORMAT_R16G16B16A16_SNORM: return 8; + case VK_FORMAT_R16G16B16A16_USCALED: return 8; + case VK_FORMAT_R16G16B16A16_SSCALED: return 8; + case VK_FORMAT_R16G16B16A16_UINT: return 8; + case VK_FORMAT_R16G16B16A16_SINT: return 8; + case VK_FORMAT_R16G16B16A16_SFLOAT: return 8; + case VK_FORMAT_R32_UINT: return 4; + case VK_FORMAT_R32_SINT: return 4; + case VK_FORMAT_R32_SFLOAT: return 4; + case VK_FORMAT_R32G32_UINT: return 8; + case VK_FORMAT_R32G32_SINT: return 8; + case VK_FORMAT_R32G32_SFLOAT: return 8; + case VK_FORMAT_R32G32B32_UINT: return 12; + case VK_FORMAT_R32G32B32_SINT: return 12; + case VK_FORMAT_R32G32B32_SFLOAT: return 12; + case VK_FORMAT_R32G32B32A32_UINT: return 16; + case VK_FORMAT_R32G32B32A32_SINT: return 16; + case VK_FORMAT_R32G32B32A32_SFLOAT: return 16; + case VK_FORMAT_R64_UINT: return 8; + case VK_FORMAT_R64_SINT: return 8; + case VK_FORMAT_R64_SFLOAT: return 8; + case VK_FORMAT_R64G64_UINT: return 16; + case VK_FORMAT_R64G64_SINT: return 16; + case VK_FORMAT_R64G64_SFLOAT: return 16; + case VK_FORMAT_R64G64B64_UINT: return 24; + case VK_FORMAT_R64G64B64_SINT: return 24; + case VK_FORMAT_R64G64B64_SFLOAT: return 24; + case VK_FORMAT_R64G64B64A64_UINT: return 32; + case VK_FORMAT_R64G64B64A64_SINT: return 32; + case VK_FORMAT_R64G64B64A64_SFLOAT: return 32; + case VK_FORMAT_B10G11R11_UFLOAT_PACK32: return 4; + case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32: return 4; + + default: return 0; + } +} + +const char* kvfVerbaliseVkResult(VkResult result) +{ + switch(result) + { + case VK_SUCCESS: return "Success"; + case VK_NOT_READY: return "A fence or query has not yet completed"; + case VK_TIMEOUT: return "A wait operation has not completed in the specified time"; + case VK_EVENT_SET: return "An event is signaled"; + case VK_EVENT_RESET: return "An event is unsignaled"; + case VK_INCOMPLETE: return "A return array was too small for the result"; + case VK_ERROR_OUT_OF_HOST_MEMORY: return "A host memory allocation has failed"; + case VK_ERROR_OUT_OF_DEVICE_MEMORY: return "A device memory allocation has failed"; + case VK_ERROR_INITIALIZATION_FAILED: return "Initialization of an object could not be completed for implementation-specific reasons"; + case VK_ERROR_DEVICE_LOST: return "The logical or physical device has been lost"; + case VK_ERROR_MEMORY_MAP_FAILED: return "Mapping of a memory object has failed"; + case VK_ERROR_LAYER_NOT_PRESENT: return "A requested layer is not present or could not be loaded"; + case VK_ERROR_EXTENSION_NOT_PRESENT: return "A requested extension is not supported"; + case VK_ERROR_FEATURE_NOT_PRESENT: return "A requested feature is not supported"; + case VK_ERROR_INCOMPATIBLE_DRIVER: return "The requested version of Vulkan is not supported by the driver or is otherwise incompatible"; + case VK_ERROR_TOO_MANY_OBJECTS: return "Too many objects of the type have already been created"; + case VK_ERROR_FORMAT_NOT_SUPPORTED: return "A requested format is not supported on this device"; + case VK_ERROR_SURFACE_LOST_KHR: return "A surface is no longer available"; + case VK_SUBOPTIMAL_KHR: return "A swapchain no longer matches the surface properties exactly, but can still be used"; + case VK_ERROR_OUT_OF_DATE_KHR: return "A surface has changed in such a way that it is no longer compatible with the swapchain"; + case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR: return "The display used by a swapchain does not use the same presentable image layout"; + case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR: return "The requested window is already connected to a VkSurfaceKHR, or to some other non-Vulkan API"; + case VK_ERROR_VALIDATION_FAILED_EXT: return "A validation layer found an error"; + + default: return "Unknown Vulkan error"; + } + return NULL; // just to avoid warnings +} + +#ifdef KVF_ENABLE_VALIDATION_LAYERS + bool __kvfCheckValidationLayerSupport() + { + uint32_t layer_count; + vkEnumerateInstanceLayerProperties(&layer_count, NULL); + VkLayerProperties* available_layers = (VkLayerProperties*)KVF_MALLOC(sizeof(VkLayerProperties) * layer_count); + vkEnumerateInstanceLayerProperties(&layer_count, available_layers); + for(size_t i = 0; i < __kvf_extra_layers_count; i++) + { + bool found = false; + for(size_t j = 0; j < layer_count; j++) + { + if(strcmp(available_layers[j].layerName, __kvf_extra_layers[i]) == 0) + { + found = true; + break; + } + } + if(!found) + { + KVF_FREE(available_layers); + return false; + } + } + KVF_FREE(available_layers); + return true; + } + + VKAPI_ATTR VkBool32 VKAPI_CALL __kvfDebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageType, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, void* pUserData) + { + if(messageSeverity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) + { + if(__kvf_validation_error_callback != NULL) + { + char buffer[4096]; + snprintf(buffer, 4096, "KVF Vulkan validation error : %s", pCallbackData->pMessage); + __kvf_validation_error_callback(buffer); + return VK_FALSE; + } + fprintf(stderr, "\nKVF Vulkan validation error : %s\n", pCallbackData->pMessage); + } + else if(messageSeverity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) + { + if(__kvf_validation_warning_callback != NULL) + { + char buffer[4096]; + snprintf(buffer, 4096, "KVF Vulkan validation warning : %s", pCallbackData->pMessage); + __kvf_validation_warning_callback(buffer); + return VK_FALSE; + } + fprintf(stderr, "\nKVF Vulkan validation warning : %s\n", pCallbackData->pMessage); + } + return VK_FALSE; + } + + void __kvfPopulateDebugMessengerCreateInfo(VkDebugUtilsMessengerCreateInfoEXT* create_info) + { + create_info->sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; + create_info->messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT; + create_info->messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; + create_info->pfnUserCallback = __kvfDebugCallback; + } + + VkResult __kvfCreateDebugUtilsMessengerEXT(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* create_info, VkDebugUtilsMessengerEXT* messenger) + { + PFN_vkCreateDebugUtilsMessengerEXT func = (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(instance, "vkCreateDebugUtilsMessengerEXT"); + return func ? func(instance, create_info, NULL, messenger) : VK_ERROR_EXTENSION_NOT_PRESENT; + } + + void __kvfInitValidationLayers(VkInstance instance) + { + uint32_t extension_count; + vkEnumerateInstanceExtensionProperties(NULL, &extension_count, NULL); + VkExtensionProperties* extensions = (VkExtensionProperties*)KVF_MALLOC(extension_count * sizeof(VkExtensionProperties)); + vkEnumerateInstanceExtensionProperties(NULL, &extension_count, extensions); + bool extension_found = false; + for(uint32_t i = 0; i < extension_count; i++) + { + if(strcmp(extensions[i].extensionName, VK_EXT_DEBUG_UTILS_EXTENSION_NAME) == 0) + { + extension_found = true; + break; + } + } + if(!extension_found) + { + if(__kvf_validation_warning_callback != NULL) + { + char buffer[1024]; + snprintf(buffer, 1024, "KVF Vulkan warning: %s is not present; cannot enable validation layers", VK_EXT_DEBUG_UTILS_EXTENSION_NAME); + __kvf_validation_warning_callback(buffer); + return; + } + printf("KVF Vulkan warning: %s is not present; cannot enable validation layers", VK_EXT_DEBUG_UTILS_EXTENSION_NAME); + KVF_FREE(extensions); + return; + } + VkDebugUtilsMessengerCreateInfoEXT create_info = {}; + __kvfPopulateDebugMessengerCreateInfo(&create_info); + __kvfCheckVk(__kvfCreateDebugUtilsMessengerEXT(instance, &create_info, &__kvf_debug_messenger)); + } + + void __kvfDestroyDebugUtilsMessengerEXT(VkInstance instance) + { + PFN_vkDestroyDebugUtilsMessengerEXT func = (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(instance, "vkDestroyDebugUtilsMessengerEXT"); + if(func) + func(instance, __kvf_debug_messenger, NULL); + } +#endif // KVF_ENABLE_VALIDATION_LAYERS + +void kvfAddLayer(const char* layer) +{ + #ifdef KVF_ENABLE_VALIDATION_LAYERS + __kvf_extra_layers = (char**)KVF_REALLOC(__kvf_extra_layers, sizeof(char*) * (__kvf_extra_layers_count + 1)); + KVF_ASSERT(__kvf_extra_layers != NULL); + __kvf_extra_layers[__kvf_extra_layers_count] = (char*)KVF_MALLOC(strlen(layer) + 1); + KVF_ASSERT(__kvf_extra_layers[__kvf_extra_layers_count] != NULL); + strcpy(__kvf_extra_layers[__kvf_extra_layers_count], layer); + __kvf_extra_layers_count++; + #else + if(__kvf_validation_error_callback != NULL) + { + char buffer[4096]; + snprintf(buffer, 4096, "KVF Vulkan validation error : cannot add extra layers, validation layers are not enabled. Try adding #define KVF_ENABLE_VALIDATION_LAYERS"); + __kvf_validation_error_callback(buffer); + return; + } + fprintf(stderr, "KVF Vulkan validation error : cannot add extra layers, validation layers are not enabled. Try adding #define KVF_ENABLE_VALIDATION_LAYERS"); + #endif +} + +VkInstance kvfCreateInstance(const char** extensions_enabled, uint32_t extensions_count) +{ + VkInstance instance = VK_NULL_HANDLE; + + VkInstanceCreateInfo create_info = {}; + create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; + create_info.pApplicationInfo = NULL; + create_info.flags = 0; + create_info.enabledExtensionCount = extensions_count; + create_info.ppEnabledExtensionNames = extensions_enabled; + create_info.enabledLayerCount = 0; + create_info.ppEnabledLayerNames = NULL; + create_info.pNext = NULL; + +#ifdef KVF_ENABLE_VALIDATION_LAYERS + kvfAddLayer("VK_LAYER_KHRONOS_validation"); + const char** new_extension_set = NULL; + if(__kvfCheckValidationLayerSupport()) + { + VkDebugUtilsMessengerCreateInfoEXT debug_create_info = {}; + __kvfPopulateDebugMessengerCreateInfo(&debug_create_info); + new_extension_set = (const char**)KVF_MALLOC(sizeof(char*) * (extensions_count + 1)); + memcpy(new_extension_set, extensions_enabled, sizeof(char*) * extensions_count); + new_extension_set[extensions_count] = VK_EXT_DEBUG_UTILS_EXTENSION_NAME; + + create_info.enabledExtensionCount = extensions_count + 1; + create_info.ppEnabledExtensionNames = new_extension_set; + create_info.enabledLayerCount = __kvf_extra_layers_count; + create_info.ppEnabledLayerNames = (const char* const*)__kvf_extra_layers; + create_info.pNext = (VkDebugUtilsMessengerCreateInfoEXT*)&debug_create_info; + } +#endif + + __kvfCheckVk(vkCreateInstance(&create_info, NULL, &instance)); +#ifdef KVF_ENABLE_VALIDATION_LAYERS + KVF_FREE(new_extension_set); + __kvfInitValidationLayers(instance); +#endif + return instance; +} + +void kvfDestroyInstance(VkInstance instance) +{ + if(instance == VK_NULL_HANDLE) + return; +#ifdef KVF_ENABLE_VALIDATION_LAYERS + __kvfDestroyDebugUtilsMessengerEXT(instance); + for(size_t i = 0; i < __kvf_extra_layers_count; i++) + KVF_FREE(__kvf_extra_layers[i]); + KVF_FREE(__kvf_extra_layers); + __kvf_extra_layers_count = 0; +#endif + vkDestroyInstance(instance, NULL); +} + +VkPhysicalDevice kvfPickFirstPhysicalDevice(VkInstance instance) +{ + uint32_t device_count; + VkPhysicalDevice* devices = NULL; + VkPhysicalDevice chosen_one = VK_NULL_HANDLE; + + KVF_ASSERT(instance != VK_NULL_HANDLE); + + vkEnumeratePhysicalDevices(instance, &device_count, NULL); + devices = (VkPhysicalDevice*)KVF_MALLOC(sizeof(VkPhysicalDevice) * device_count + 1); + vkEnumeratePhysicalDevices(instance, &device_count, devices); + chosen_one = devices[0]; + KVF_FREE(devices); + return chosen_one; +} + +__KvfQueueFamilies __kvfFindQueueFamilies(VkPhysicalDevice physical, VkSurfaceKHR surface) +{ + __KvfQueueFamilies queues = { -1, -1, -1 }; + uint32_t queue_family_count; + vkGetPhysicalDeviceQueueFamilyProperties(physical, &queue_family_count, NULL); + VkQueueFamilyProperties* queue_families = (VkQueueFamilyProperties*)KVF_MALLOC(sizeof(VkQueueFamilyProperties) * queue_family_count); + vkGetPhysicalDeviceQueueFamilyProperties(physical, &queue_family_count, queue_families); + + for(int i = 0; i < queue_family_count; i++) + { + // try to find a queue family index that supports compute but not graphics + if(queue_families[i].queueFlags & VK_QUEUE_COMPUTE_BIT && (queue_families[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) == 0) + queues.compute = i; + else if(queues.compute != -1 && queue_families[i].queueFlags & VK_QUEUE_COMPUTE_BIT) // else just find a compute queue + queues.compute = i; + if(queue_families[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) + queues.graphics = i; + VkBool32 present_support = false; + vkGetPhysicalDeviceSurfaceSupportKHR(physical, i, surface, &present_support); + if(present_support) + queues.present = i; + + if(queues.graphics != -1 && queues.present != -1 && queues.compute != -1) + break; + } + KVF_FREE(queue_families); + return queues; +} + +VkPhysicalDevice kvfPickGoodDefaultPhysicalDevice(VkInstance instance, VkSurfaceKHR surface) +{ + const char* extensions[] = { VK_KHR_SWAPCHAIN_EXTENSION_NAME }; + return kvfPickGoodPhysicalDevice(instance, surface, extensions, sizeof(extensions) / sizeof(extensions[0])); +} + +int32_t __kvfScorePhysicalDevice(VkPhysicalDevice device, VkSurfaceKHR surface, const char** deviceExtensions, uint32_t deviceExtensionsCount) +{ + /* Check Extensions Support */ + uint32_t extension_count; + vkEnumerateDeviceExtensionProperties(device, NULL, &extension_count, NULL); + VkExtensionProperties* props = (VkExtensionProperties*)KVF_MALLOC(sizeof(VkExtensionProperties) * extension_count + 1); + vkEnumerateDeviceExtensionProperties(device, NULL, &extension_count, props); + + bool are_there_required_device_extensions = true; + for(int j = 0; j < deviceExtensionsCount; j++) + { + bool is_there_extension = false; + for(int k = 0; k < extension_count; k++) + { + if(strcmp(deviceExtensions[j], props[k].extensionName) == 0) + { + is_there_extension = true; + break; + } + } + if(is_there_extension == false) + { + are_there_required_device_extensions = false; + break; + } + } + KVF_FREE(props); + if(are_there_required_device_extensions == false) + return -1; + + /* Check Queue Families Support */ + __KvfQueueFamilies queues = __kvfFindQueueFamilies(device, surface); + if(queues.graphics == -1 || queues.present == -1) + return -1; + + /* Check Surface Formats Counts */ + uint32_t format_count; + vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &format_count, NULL); + if(format_count == 0) + return -1; + + VkPhysicalDeviceProperties device_props; + vkGetPhysicalDeviceProperties(device, &device_props); + + VkPhysicalDeviceFeatures device_features; + vkGetPhysicalDeviceFeatures(device, &device_features); + + int32_t score = -1; + if(device_props.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) + score += 1000; + + if(!device_features.geometryShader) + return -1; + + score += device_props.limits.maxImageDimension2D; + score += device_props.limits.maxBoundDescriptorSets; + + return score; +} + +VkPhysicalDevice kvfPickGoodPhysicalDevice(VkInstance instance, VkSurfaceKHR surface, const char** deviceExtensions, uint32_t deviceExtensionsCount) +{ + VkPhysicalDevice* devices = NULL; + VkPhysicalDevice chosen_one = VK_NULL_HANDLE; + uint32_t device_count; + int32_t best_device_score = -1; + + KVF_ASSERT(instance != VK_NULL_HANDLE); + KVF_ASSERT(surface != VK_NULL_HANDLE); + + vkEnumeratePhysicalDevices(instance, &device_count, NULL); + devices = (VkPhysicalDevice*)KVF_MALLOC(sizeof(VkPhysicalDevice) * device_count + 1); + vkEnumeratePhysicalDevices(instance, &device_count, devices); + + for(int i = 0; i < device_count; i++) + { + int32_t current_device_score = __kvfScorePhysicalDevice(devices[i], surface, deviceExtensions, deviceExtensionsCount); + if(current_device_score > best_device_score) + { + best_device_score = current_device_score; + chosen_one = devices[i]; + } + } + KVF_FREE(devices); + if(chosen_one != VK_NULL_HANDLE) + { + __KvfQueueFamilies queues = __kvfFindQueueFamilies(chosen_one, surface); + __kvfAddDeviceToArray(chosen_one, queues.graphics, queues.present); + return chosen_one; + } + return VK_NULL_HANDLE; +} + +VkDevice kvfCreateDefaultDevice(VkPhysicalDevice physical) +{ + const char* extensions[] = { VK_KHR_SWAPCHAIN_EXTENSION_NAME }; + return kvfCreateDevice(physical, extensions, sizeof(extensions) / sizeof(extensions[0])); +} + +VkDevice kvfCreateDevice(VkPhysicalDevice physical, const char** extensions, uint32_t extensions_count) +{ + const float queue_priority = 1.0f; + + __KvfDevice* kvfdevice = __kvfGetKvfDeviceFromVkPhysicalDevice(physical); + + KVF_ASSERT(kvfdevice != NULL); + KVF_ASSERT(kvfdevice->queues.graphics != -1); + KVF_ASSERT(kvfdevice->queues.present != -1); + + VkDeviceQueueCreateInfo queue_create_info[2]; + queue_create_info[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_info[0].queueFamilyIndex = kvfdevice->queues.graphics; + queue_create_info[0].queueCount = 1; + queue_create_info[0].pQueuePriorities = &queue_priority; + queue_create_info[0].flags = 0; + queue_create_info[0].pNext = NULL; + queue_create_info[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_info[1].queueFamilyIndex = kvfdevice->queues.present; + queue_create_info[1].queueCount = 1; + queue_create_info[1].pQueuePriorities = &queue_priority; + queue_create_info[1].flags = 0; + queue_create_info[1].pNext = NULL; + + VkPhysicalDeviceFeatures device_features = { VK_FALSE }; + + VkDeviceCreateInfo createInfo; + createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; + createInfo.queueCreateInfoCount = (kvfdevice->queues.graphics == kvfdevice->queues.present ? 1 : 2); + createInfo.pQueueCreateInfos = queue_create_info; + createInfo.pEnabledFeatures = &device_features; + createInfo.enabledExtensionCount = extensions_count; + createInfo.ppEnabledExtensionNames = extensions; + createInfo.enabledLayerCount = 0; + createInfo.ppEnabledLayerNames = NULL; + createInfo.flags = 0; + createInfo.pNext = NULL; + + VkDevice device; + __kvfCheckVk(vkCreateDevice(physical, &createInfo, NULL, &device)); + __kvfCompleteDevice(physical, device); + + return device; +} + +void kvfDestroyDevice(VkDevice device) +{ + if(device == VK_NULL_HANDLE) + return; + __kvfDestroyDevice(device); +} + +VkQueue kvfGetDeviceQueue(VkDevice device, KvfQueueType queue) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvfdevice = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvfdevice != NULL); + VkQueue vk_queue = VK_NULL_HANDLE; + if(queue == KVF_GRAPHICS_QUEUE) + vkGetDeviceQueue(device, kvfdevice->queues.graphics, 0, &vk_queue); + else if(queue == KVF_PRESENT_QUEUE) + vkGetDeviceQueue(device, kvfdevice->queues.present, 0, &vk_queue); + else if(queue == KVF_COMPUTE_QUEUE) + vkGetDeviceQueue(device, kvfdevice->queues.compute, 0, &vk_queue); + return vk_queue; +} + +uint32_t kvfGetDeviceQueueFamily(VkDevice device, KvfQueueType queue) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvfdevice = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvfdevice != NULL); + VkQueue vk_queue = VK_NULL_HANDLE; + if(queue == KVF_GRAPHICS_QUEUE) + return kvfdevice->queues.graphics; + else if(queue == KVF_PRESENT_QUEUE) + return kvfdevice->queues.present; + else if(queue == KVF_COMPUTE_QUEUE) + return kvfdevice->queues.compute; + KVF_ASSERT(false && "invalid queue"); + return 0; +} + +bool kvfQueuePresentKHR(VkDevice device, VkSemaphore wait, VkSwapchainKHR swapchain, uint32_t image_index) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + VkPresentInfoKHR present_info = {}; + present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; + present_info.waitSemaphoreCount = 1; + present_info.pWaitSemaphores = &wait; + present_info.swapchainCount = 1; + present_info.pSwapchains = &swapchain; + present_info.pImageIndices = &image_index; + VkResult result = vkQueuePresentKHR(kvfGetDeviceQueue(device, KVF_PRESENT_QUEUE), &present_info); + if(result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR) + return false; + else + __kvfCheckVk(result); + return true; +} + +VkFence kvfCreateFence(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + VkFenceCreateInfo fence_info = {}; + fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; + fence_info.flags = VK_FENCE_CREATE_SIGNALED_BIT; + VkFence fence; + __kvfCheckVk(vkCreateFence(device, &fence_info, NULL, &fence)); + return fence; +} + +void kvfWaitForFence(VkDevice device, VkFence fence) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(fence != VK_NULL_HANDLE); + vkWaitForFences(device, 1, &fence, VK_TRUE, UINT64_MAX); +} + +void kvfDestroyFence(VkDevice device, VkFence fence) +{ + if(fence == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + vkDestroyFence(device, fence, NULL); +} + +VkSemaphore kvfCreateSemaphore(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + VkSemaphoreCreateInfo semaphore_info = {}; + semaphore_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; + VkSemaphore semaphore; + __kvfCheckVk(vkCreateSemaphore(device, &semaphore_info, NULL, &semaphore)); + return semaphore; +} + +void kvfDestroySemaphore(VkDevice device, VkSemaphore semaphore) +{ + if(semaphore == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + vkDestroySemaphore(device, semaphore, NULL); +} + +__KvfSwapchainSupportInternal __kvfQuerySwapchainSupport(VkPhysicalDevice physical, VkSurfaceKHR surface) +{ + __KvfSwapchainSupportInternal support; + + __kvfCheckVk(vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical, surface, &support.capabilities)); + + vkGetPhysicalDeviceSurfaceFormatsKHR(physical, surface, &support.formatsCount, NULL); + if(support.formatsCount != 0) + { + support.formats = (VkSurfaceFormatKHR*)KVF_MALLOC(sizeof(VkSurfaceFormatKHR) * support.formatsCount); + vkGetPhysicalDeviceSurfaceFormatsKHR(physical, surface, &support.formatsCount, support.formats); + } + + vkGetPhysicalDeviceSurfacePresentModesKHR(physical, surface, &support.presentModesCount, NULL); + if(support.presentModesCount != 0) + { + support.presentModes = (VkPresentModeKHR*)KVF_MALLOC(sizeof(VkPresentModeKHR) * support.presentModesCount); + vkGetPhysicalDeviceSurfacePresentModesKHR(physical, surface, &support.presentModesCount, support.presentModes); + } + return support; +} + +VkSurfaceFormatKHR __kvfChooseSwapSurfaceFormat(__KvfSwapchainSupportInternal* support) +{ + for(int i = 0; i < support->formatsCount; i++) + { + if(support->formats[i].format == VK_FORMAT_R8G8B8A8_SRGB && support->formats[i].colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) + return support->formats[i]; + } + return support->formats[0]; +} + +VkPresentModeKHR __kvfChooseSwapPresentMode(__KvfSwapchainSupportInternal* support, bool tryVsync) +{ + if(tryVsync == false) + return VK_PRESENT_MODE_IMMEDIATE_KHR; + for(int i = 0; i < support->presentModesCount; i++) + { + if(support->presentModes[i] == VK_PRESENT_MODE_MAILBOX_KHR) + return support->presentModes[i]; + } + return VK_PRESENT_MODE_FIFO_KHR; +} + +uint32_t __kvfClamp(uint32_t i, uint32_t min, uint32_t max) +{ + const uint32_t t = i < min ? min : i; + return t > max ? max : t; +} + +VkSwapchainKHR kvfCreateSwapchainKHR(VkDevice device, VkPhysicalDevice physical, VkSurfaceKHR surface, VkExtent2D extent, bool tryVsync) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + VkSwapchainKHR swapchain; + __KvfSwapchainSupportInternal support = __kvfQuerySwapchainSupport(physical, surface); + + VkSurfaceFormatKHR surfaceFormat = __kvfChooseSwapSurfaceFormat(&support); + VkPresentModeKHR presentMode = __kvfChooseSwapPresentMode(&support, tryVsync); + + uint32_t imageCount = support.capabilities.minImageCount + 1; + if(support.capabilities.maxImageCount > 0 && imageCount > support.capabilities.maxImageCount) + imageCount = support.capabilities.maxImageCount; + + __KvfDevice* kvfdevice = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvfdevice != NULL); + + uint32_t queueFamilyIndices[] = { (uint32_t)kvfdevice->queues.graphics, (uint32_t)kvfdevice->queues.present }; + + if(support.capabilities.currentExtent.width != UINT32_MAX) + extent = support.capabilities.currentExtent; + else + { + extent.width = __kvfClamp(extent.width, support.capabilities.minImageExtent.width, support.capabilities.maxImageExtent.width); + extent.height = __kvfClamp(extent.height, support.capabilities.minImageExtent.height, support.capabilities.maxImageExtent.height); + } + + VkSwapchainCreateInfoKHR createInfo = {}; + createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; + createInfo.surface = surface; + createInfo.minImageCount = imageCount; + createInfo.imageFormat = surfaceFormat.format; + createInfo.imageColorSpace = surfaceFormat.colorSpace; + createInfo.imageExtent = extent; + createInfo.imageArrayLayers = 1; + createInfo.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; + createInfo.preTransform = support.capabilities.currentTransform; + createInfo.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; + createInfo.presentMode = presentMode; + createInfo.clipped = VK_TRUE; + createInfo.oldSwapchain = VK_NULL_HANDLE; + + if(kvfdevice->queues.graphics != kvfdevice->queues.present) + { + createInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT; + createInfo.queueFamilyIndexCount = 2; + createInfo.pQueueFamilyIndices = queueFamilyIndices; + } + else + createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; + + __kvfCheckVk(vkCreateSwapchainKHR(device, &createInfo, NULL, &swapchain)); + + uint32_t images_count; + vkGetSwapchainImagesKHR(device, swapchain, (uint32_t*)&images_count, NULL); + + __kvfAddSwapchainToArray(swapchain, support, surfaceFormat.format, images_count, extent); + + return swapchain; +} + +VkFormat kvfGetSwapchainImagesFormat(VkSwapchainKHR swapchain) +{ + __KvfSwapchain* kvf_swapchain = __kvfGetKvfSwapchainFromVkSwapchainKHR(swapchain); + KVF_ASSERT(kvf_swapchain != NULL); + return kvf_swapchain->images_format; +} + +uint32_t kvfGetSwapchainImagesCount(VkSwapchainKHR swapchain) +{ + __KvfSwapchain* kvf_swapchain = __kvfGetKvfSwapchainFromVkSwapchainKHR(swapchain); + KVF_ASSERT(kvf_swapchain != NULL); + return kvf_swapchain->images_count; +} + +uint32_t kvfGetSwapchainMinImagesCount(VkSwapchainKHR swapchain) +{ + __KvfSwapchain* kvf_swapchain = __kvfGetKvfSwapchainFromVkSwapchainKHR(swapchain); + KVF_ASSERT(kvf_swapchain != NULL); + return kvf_swapchain->support.capabilities.minImageCount; +} + +VkExtent2D kvfGetSwapchainImagesSize(VkSwapchainKHR swapchain) +{ + __KvfSwapchain* kvf_swapchain = __kvfGetKvfSwapchainFromVkSwapchainKHR(swapchain); + KVF_ASSERT(kvf_swapchain != NULL); + return kvf_swapchain->images_extent; +} + +void kvfDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain) +{ + if(swapchain == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + __kvfDestroySwapchain(device, swapchain); +} + +VkImage kvfCreateImage(VkDevice device, uint32_t width, uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + VkImageCreateInfo image_info = {}; + image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + image_info.imageType = VK_IMAGE_TYPE_2D; + image_info.extent.width = width; + image_info.extent.height = height; + image_info.extent.depth = 1; + image_info.mipLevels = 1; + image_info.arrayLayers = 1; + image_info.format = format; + image_info.tiling = tiling; + image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + image_info.usage = usage; + image_info.samples = VK_SAMPLE_COUNT_1_BIT; + image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + VkImage image; + __kvfCheckVk(vkCreateImage(device, &image_info, NULL, &image)); + return image; +} + +void kvfDestroyImage(VkDevice device, VkImage image) +{ + if(image == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + vkDestroyImage(device, image, NULL); +} + +VkImageView kvfCreateImageView(VkDevice device, VkImage image, VkFormat format, VkImageViewType type, VkImageAspectFlags aspect) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + VkImageViewCreateInfo create_info = {}; + create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + create_info.image = image; + create_info.viewType = type; + create_info.format = format; + create_info.components.r = VK_COMPONENT_SWIZZLE_IDENTITY; + create_info.components.g = VK_COMPONENT_SWIZZLE_IDENTITY; + create_info.components.b = VK_COMPONENT_SWIZZLE_IDENTITY; + create_info.components.a = VK_COMPONENT_SWIZZLE_IDENTITY; + create_info.subresourceRange.aspectMask = aspect; + create_info.subresourceRange.baseMipLevel = 0; + create_info.subresourceRange.levelCount = 1; + create_info.subresourceRange.baseArrayLayer = 0; + create_info.subresourceRange.layerCount = 1; + VkImageView view; + __kvfCheckVk(vkCreateImageView(device, &create_info, NULL, &view)); + return view; +} + +void kvfDestroyImageView(VkDevice device, VkImageView image_view) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(image_view != VK_NULL_HANDLE); + vkDestroyImageView(device, image_view, NULL); +} + +void kvfTransitionImageLayout(VkDevice device, VkImage image, VkCommandBuffer cmd, VkFormat format, VkImageLayout old_layout, VkImageLayout new_layout, bool is_single_time_cmd_buffer) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + + if(new_layout == old_layout) + return; + + if(is_single_time_cmd_buffer) + kvfBeginCommandBuffer(cmd, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT); + + VkImageMemoryBarrier barrier = {}; + barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + barrier.oldLayout = old_layout; + barrier.newLayout = new_layout; + barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.image = image; + barrier.subresourceRange.aspectMask = kvfIsDepthFormat(format) ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT; + barrier.subresourceRange.baseMipLevel = 0; + barrier.subresourceRange.levelCount = 1; + barrier.subresourceRange.baseArrayLayer = 0; + barrier.subresourceRange.layerCount = 1; + barrier.srcAccessMask = kvfLayoutToAccessMask(old_layout, false); + barrier.dstAccessMask = kvfLayoutToAccessMask(new_layout, true); + if(kvfIsStencilFormat(format)) + barrier.subresourceRange.aspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT; + + VkPipelineStageFlags source_stage = 0; + if(barrier.oldLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) + source_stage = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; + else if(barrier.srcAccessMask != 0) + source_stage = kvfAccessFlagsToPipelineStage(barrier.srcAccessMask, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT); + else + source_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; + + VkPipelineStageFlags destination_stage = 0; + if(barrier.newLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) + destination_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; + else if(barrier.dstAccessMask != 0) + destination_stage = kvfAccessFlagsToPipelineStage(barrier.dstAccessMask, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT); + else + destination_stage = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; + + vkCmdPipelineBarrier(cmd, source_stage, destination_stage, 0, 0, NULL, 0, NULL, 1, &barrier); + + if(is_single_time_cmd_buffer) + { + kvfEndCommandBuffer(cmd); + VkFence fence = kvfCreateFence(device); + kvfSubmitSingleTimeCommandBuffer(device, cmd, KVF_GRAPHICS_QUEUE, fence); + kvfDestroyFence(device, fence); + } +} + +VkSampler kvfCreateSampler(VkDevice device, VkFilter filters, VkSamplerAddressMode address_modes, VkSamplerMipmapMode mipmap_mode) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + VkSamplerCreateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; + info.magFilter = filters; + info.minFilter = filters; + info.mipmapMode = mipmap_mode; + info.addressModeU = address_modes; + info.addressModeV = address_modes; + info.addressModeW = address_modes; + info.minLod = -1000; + info.maxLod = 1000; + info.anisotropyEnable = VK_FALSE; + info.maxAnisotropy = 1.0f; + VkSampler sampler; + __kvfCheckVk(vkCreateSampler(device, &info, NULL, &sampler)); + return sampler; +} + +void kvfDestroySampler(VkDevice device, VkSampler sampler) +{ + if(sampler == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + vkDestroySampler(device, sampler, NULL); +} + +VkBuffer kvfCreateBuffer(VkDevice device, VkBufferUsageFlags usage, VkDeviceSize size) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + VkBufferCreateInfo buffer_info = {}; + buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + buffer_info.size = size; + buffer_info.usage = usage; + buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + VkBuffer buffer; + __kvfCheckVk(vkCreateBuffer(device, &buffer_info, NULL, &buffer)); + return buffer; +} + +void kvfCopyBufferToBuffer(VkCommandBuffer cmd, VkBuffer dst, VkBuffer src, size_t size) +{ + KVF_ASSERT(cmd != VK_NULL_HANDLE); + KVF_ASSERT(dst != VK_NULL_HANDLE); + KVF_ASSERT(src != VK_NULL_HANDLE); + VkBufferCopy copy_region = {}; + copy_region.size = size; + vkCmdCopyBuffer(cmd, src, dst, 1, ©_region); +} + +void kvfCopyBufferToImage(VkCommandBuffer cmd, VkImage dst, VkBuffer src, size_t buffer_offset, VkImageAspectFlagBits aspect, VkExtent3D extent) +{ + KVF_ASSERT(cmd != VK_NULL_HANDLE); + KVF_ASSERT(dst != VK_NULL_HANDLE); + KVF_ASSERT(src != VK_NULL_HANDLE); + VkOffset3D offset = { 0, 0, 0 }; + VkBufferImageCopy region = {}; + region.bufferOffset = buffer_offset; + region.bufferRowLength = 0; + region.bufferImageHeight = 0; + region.imageSubresource.aspectMask = aspect; + region.imageSubresource.mipLevel = 0; + region.imageSubresource.baseArrayLayer = 0; + region.imageSubresource.layerCount = 1; + region.imageOffset = offset; + region.imageExtent = extent; + vkCmdCopyBufferToImage(cmd, src, dst, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); +} + +void kvfDestroyBuffer(VkDevice device, VkBuffer buffer) +{ + if(buffer != VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + vkDestroyBuffer(device, buffer, NULL); +} + +VkFramebuffer kvfCreateFramebuffer(VkDevice device, VkRenderPass render_pass, VkImageView* image_views, size_t image_views_count, VkExtent2D extent) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(image_views != NULL); + + VkFramebufferCreateInfo framebuffer_info = {}; + framebuffer_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; + framebuffer_info.renderPass = render_pass; + framebuffer_info.attachmentCount = image_views_count; + framebuffer_info.pAttachments = image_views; + framebuffer_info.width = extent.width; + framebuffer_info.height = extent.height; + framebuffer_info.layers = 1; + VkFramebuffer framebuffer = VK_NULL_HANDLE; + __kvfCheckVk(vkCreateFramebuffer(device, &framebuffer_info, NULL, &framebuffer)); + __kvfAddFramebufferToArray(framebuffer, extent); + return framebuffer; +} + +VkExtent2D kvfGetFramebufferSize(VkFramebuffer buffer) +{ + __KvfFramebuffer* kvf_framebuffer = __kvfGetKvfSwapchainFromVkFramebuffer(buffer); + KVF_ASSERT(kvf_framebuffer != NULL); + return kvf_framebuffer->extent; +} + +void kvfDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer) +{ + if(framebuffer == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + __kvfDestroyFramebuffer(device, framebuffer); +} + +VkCommandBuffer kvfCreateCommandBuffer(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + return kvfCreateCommandBufferLeveled(device, VK_COMMAND_BUFFER_LEVEL_PRIMARY); +} + +VkCommandBuffer kvfCreateCommandBufferLeveled(VkDevice device, VkCommandBufferLevel level) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvfdevice = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvfdevice != NULL); + + VkCommandPool pool = kvfdevice->cmd_pool; + VkCommandBuffer buffer; + VkCommandBufferAllocateInfo alloc_info = {}; + alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; + alloc_info.commandPool = pool; + alloc_info.level = level; + alloc_info.commandBufferCount = 1; + __kvfCheckVk(vkAllocateCommandBuffers(device, &alloc_info, &buffer)); + return buffer; +} + +void kvfBeginCommandBuffer(VkCommandBuffer buffer, VkCommandBufferUsageFlags usage) +{ + KVF_ASSERT(buffer != VK_NULL_HANDLE); + VkCommandBufferBeginInfo begin_info = {}; + begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; + begin_info.flags = usage; + __kvfCheckVk(vkBeginCommandBuffer(buffer, &begin_info)); +} + +void kvfEndCommandBuffer(VkCommandBuffer buffer) +{ + KVF_ASSERT(buffer != VK_NULL_HANDLE); + __kvfCheckVk(vkEndCommandBuffer(buffer)); +} + +void kvfSubmitCommandBuffer(VkDevice device, VkCommandBuffer buffer, KvfQueueType queue, VkSemaphore signal, VkSemaphore wait, VkFence fence, VkPipelineStageFlags* stages) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + + VkSemaphore signal_semaphores[1]; + VkSemaphore wait_semaphores[1]; + signal_semaphores[0] = signal; + wait_semaphores[0] = wait; + + if(fence != VK_NULL_HANDLE) + vkResetFences(device, 1, &fence); + + VkSubmitInfo submit_info = {}; + submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; + submit_info.waitSemaphoreCount = (wait == VK_NULL_HANDLE ? 0 : 1); + submit_info.pWaitSemaphores = wait_semaphores; + submit_info.pWaitDstStageMask = stages; + submit_info.commandBufferCount = 1; + submit_info.pCommandBuffers = &buffer; + submit_info.signalSemaphoreCount = (signal == VK_NULL_HANDLE ? 0 : 1); + submit_info.pSignalSemaphores = signal_semaphores; + __kvfCheckVk(vkQueueSubmit(kvfGetDeviceQueue(device, queue), 1, &submit_info, fence)); +} + +void kvfSubmitSingleTimeCommandBuffer(VkDevice device, VkCommandBuffer buffer, KvfQueueType queue, VkFence fence) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + + if(fence != VK_NULL_HANDLE) + vkResetFences(device, 1, &fence); + + VkSubmitInfo submit_info = {}; + submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; + submit_info.commandBufferCount = 1; + submit_info.pCommandBuffers = &buffer; + __kvfCheckVk(vkQueueSubmit(kvfGetDeviceQueue(device, queue), 1, &submit_info, fence)); + if(fence != VK_NULL_HANDLE) + vkWaitForFences(device, 1, &fence, VK_TRUE, UINT64_MAX); +} + +VkAttachmentDescription kvfBuildAttachmentDescription(KvfImageType type, VkFormat format, VkImageLayout initial, VkImageLayout final, bool clear) +{ + VkAttachmentDescription attachment = {}; + + switch(type) + { + case KVF_IMAGE_CUBE: + case KVF_IMAGE_DEPTH_ARRAY: + case KVF_IMAGE_COLOR: + case KVF_IMAGE_DEPTH: + { + attachment.format = format; + attachment.initialLayout = initial; + attachment.finalLayout = final; + break; + } + + default: KVF_ASSERT(false && "KVF Attachment Description builder : unsupported image type"); break; + } + + if(clear) + { + attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; + attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; + attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + } + else + { + attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; + attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD; + } + + attachment.samples = VK_SAMPLE_COUNT_1_BIT; + attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE; + attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; + attachment.flags = 0; + + return attachment; +} + +VkAttachmentDescription kvfBuildSwapchainAttachmentDescription(VkSwapchainKHR swapchain, bool clear) +{ + __KvfSwapchain* kvf_swapchain = __kvfGetKvfSwapchainFromVkSwapchainKHR(swapchain); + KVF_ASSERT(kvf_swapchain != NULL); + KVF_ASSERT(kvf_swapchain->images_count != 0); + return kvfBuildAttachmentDescription(KVF_IMAGE_COLOR, kvf_swapchain->images_format, VK_IMAGE_LAYOUT_UNDEFINED,VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, clear); +} + +VkRenderPass kvfCreateRenderPass(VkDevice device, VkAttachmentDescription* attachments, size_t attachments_count, VkPipelineBindPoint bind_point) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + + size_t color_attachment_count = 0; + size_t depth_attachment_count = 0; + + for(size_t i = 0; i < attachments_count; i++) + { + if(kvfIsDepthFormat(attachments[i].format)) + depth_attachment_count++; + else + color_attachment_count++; + } + + VkAttachmentReference* color_references = NULL; + VkAttachmentReference* depth_references = NULL; + + if(color_attachment_count != 0) + { + color_references = (VkAttachmentReference*)KVF_MALLOC(color_attachment_count * sizeof(VkAttachmentReference)); + KVF_ASSERT(color_references != NULL); + } + if(depth_attachment_count != 0) + { + depth_references = (VkAttachmentReference*)KVF_MALLOC(depth_attachment_count * sizeof(VkAttachmentReference)); + KVF_ASSERT(depth_references != NULL); + } + + for(size_t i = 0, c = 0, d = 0; i < attachments_count; i++) + { + if(!kvfIsDepthFormat(attachments[i].format)) + { + VkImageLayout layout = attachments[i].finalLayout; + color_references[c].attachment = i; + color_references[c].layout = layout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR ? VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL : layout; + c++; + } + else + { + depth_references[d].attachment = i; + depth_references[d].layout = attachments[i].finalLayout; + d++; + } + } + + VkSubpassDescription subpass = {}; + subpass.pipelineBindPoint = bind_point; + subpass.colorAttachmentCount = color_attachment_count; + subpass.pColorAttachments = color_references; + subpass.pDepthStencilAttachment = depth_references; + + VkRenderPassCreateInfo renderpass_create_info = {}; + renderpass_create_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; + renderpass_create_info.attachmentCount = attachments_count; + renderpass_create_info.pAttachments = attachments; + renderpass_create_info.subpassCount = 1; + renderpass_create_info.pSubpasses = &subpass; + renderpass_create_info.dependencyCount = 0; + renderpass_create_info.pDependencies = NULL; + + VkRenderPass render_pass = VK_NULL_HANDLE; + __kvfCheckVk(vkCreateRenderPass(device, &renderpass_create_info, NULL, &render_pass)); + KVF_FREE(color_references); + KVF_FREE(depth_references); + return render_pass; +} + +void kvfDestroyRenderPass(VkDevice device, VkRenderPass renderPass) +{ + if(renderPass == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + vkDestroyRenderPass(device, renderPass, NULL); +} + +void kvfBeginRenderPass(VkRenderPass pass, VkCommandBuffer cmd, VkFramebuffer framebuffer, VkExtent2D framebuffer_extent, VkClearValue* clears, size_t clears_count) +{ + KVF_ASSERT(pass != VK_NULL_HANDLE); + KVF_ASSERT(framebuffer != VK_NULL_HANDLE); + + VkOffset2D offset = { 0, 0 }; + VkRenderPassBeginInfo renderpass_info = {}; + renderpass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; + renderpass_info.renderPass = pass; + renderpass_info.framebuffer = framebuffer; + renderpass_info.renderArea.offset = offset; + renderpass_info.renderArea.extent = framebuffer_extent; + renderpass_info.clearValueCount = clears_count; + renderpass_info.pClearValues = clears; + vkCmdBeginRenderPass(cmd, &renderpass_info, VK_SUBPASS_CONTENTS_INLINE); +} + +VkShaderModule kvfCreateShaderModule(VkDevice device, uint32_t* code, size_t size) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + VkShaderModuleCreateInfo createInfo = {}; + createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; + createInfo.codeSize = size * sizeof(uint32_t); + createInfo.pCode = code; + VkShaderModule shader = VK_NULL_HANDLE; + __kvfCheckVk(vkCreateShaderModule(device, &createInfo, NULL, &shader)); + return shader; +} + +void kvfDestroyShaderModule(VkDevice device, VkShaderModule shader) +{ + if(shader == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + vkDestroyShaderModule(device, shader, NULL); +} + +VkDescriptorSetLayout kvfCreateDescriptorSetLayout(VkDevice device, VkDescriptorSetLayoutBinding* bindings, size_t bindings_count) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + VkDescriptorSetLayoutCreateInfo layout_info = {}; + layout_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; + layout_info.bindingCount = bindings_count; + layout_info.pBindings = bindings; + + VkDescriptorSetLayout layout; + __kvfCheckVk(vkCreateDescriptorSetLayout(device, &layout_info, NULL, &layout)); + return layout; +} + +void kvfDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout layout) +{ + if(layout == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + vkDestroyDescriptorSetLayout(device, layout, NULL); +} + +VkDescriptorSet kvfAllocateDescriptorSet(VkDevice device, VkDescriptorSetLayout layout) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + VkDescriptorPool pool = VK_NULL_HANDLE; + for(int i = 0; i < kvf_device->sets_pools_size; i++) + { + if(kvf_device->sets_pools[i].size < kvf_device->sets_pools[i].capacity) + pool = kvf_device->sets_pools[i].pool; + } + if(pool == VK_NULL_HANDLE) + pool = __kvfDeviceCreateDescriptorPool(device); + KVF_ASSERT(pool != VK_NULL_HANDLE); + + VkDescriptorSet set = VK_NULL_HANDLE; + VkDescriptorSetAllocateInfo alloc_info = {}; + alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; + alloc_info.descriptorPool = pool; + alloc_info.descriptorSetCount = 1; + alloc_info.pSetLayouts = &layout; + __kvfCheckVk(vkAllocateDescriptorSets(device, &alloc_info, &set)); + KVF_ASSERT(set != VK_NULL_HANDLE); + return set; +} + +void kvfUpdateStorageBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding) +{ + VkWriteDescriptorSet write = kvfWriteStorageBufferToDescriptorSet(device, set, info, binding); + vkUpdateDescriptorSets(device, 1, &write, 0, NULL); +} + +void kvfUpdateUniformBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding) +{ + VkWriteDescriptorSet write = kvfWriteUniformBufferToDescriptorSet(device, set, info, binding); + vkUpdateDescriptorSets(device, 1, &write, 0, NULL); +} + +void kvfUpdateImageToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorImageInfo* info, uint32_t binding) +{ + VkWriteDescriptorSet write = kvfWriteImageToDescriptorSet(device, set, info, binding); + vkUpdateDescriptorSets(device, 1, &write, 0, NULL); +} + +VkWriteDescriptorSet kvfWriteStorageBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(set != VK_NULL_HANDLE); + VkWriteDescriptorSet descriptor_write = {}; + descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + descriptor_write.dstSet = set; + descriptor_write.dstBinding = binding; + descriptor_write.dstArrayElement = 0; + descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; + descriptor_write.descriptorCount = 1; + descriptor_write.pBufferInfo = info; + return descriptor_write; +} + +VkWriteDescriptorSet kvfWriteUniformBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(set != VK_NULL_HANDLE); + VkWriteDescriptorSet descriptor_write = {}; + descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + descriptor_write.dstSet = set; + descriptor_write.dstBinding = binding; + descriptor_write.dstArrayElement = 0; + descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; + descriptor_write.descriptorCount = 1; + descriptor_write.pBufferInfo = info; + return descriptor_write; +} + +VkWriteDescriptorSet kvfWriteImageToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorImageInfo* info, uint32_t binding) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(set != VK_NULL_HANDLE); + VkWriteDescriptorSet descriptor_write = {}; + descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + descriptor_write.dstSet = set; + descriptor_write.dstBinding = binding; + descriptor_write.dstArrayElement = 0; + descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + descriptor_write.descriptorCount = 1; + descriptor_write.pImageInfo = info; + return descriptor_write; +} + +VkPipelineLayout kvfCreatePipelineLayout(VkDevice device, VkDescriptorSetLayout* set_layouts, size_t set_layouts_count, VkPushConstantRange* pc, size_t pc_count) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + VkPipelineLayoutCreateInfo pipeline_layout_info = {}; + pipeline_layout_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + pipeline_layout_info.setLayoutCount = set_layouts_count; + pipeline_layout_info.pSetLayouts = set_layouts; + pipeline_layout_info.pushConstantRangeCount = pc_count; + pipeline_layout_info.pPushConstantRanges = pc; + + VkPipelineLayout layout; + __kvfCheckVk(vkCreatePipelineLayout(device, &pipeline_layout_info, NULL, &layout)); + return layout; +} + +void kvfDestroyPipelineLayout(VkDevice device, VkPipelineLayout layout) +{ + if(layout == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + vkDestroyPipelineLayout(device, layout, NULL); +} + +void kvfResetDeviceDescriptorPools(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + for(int i = 0; i < kvf_device->sets_pools_size; i++) + { + vkResetDescriptorPool(device, kvf_device->sets_pools[i].pool, 0); + kvf_device->sets_pools[i].size = 0; + } +} + +KvfGraphicsPipelineBuilder* kvfCreateGPipelineBuilder() +{ + KvfGraphicsPipelineBuilder* builder = (KvfGraphicsPipelineBuilder*)KVF_MALLOC(sizeof(KvfGraphicsPipelineBuilder)); + memset(builder, 0, sizeof(KvfGraphicsPipelineBuilder)); + kvfGPipelineBuilderReset(builder); + return builder; +} + +void kvfDestroyGPipelineBuilder(KvfGraphicsPipelineBuilder* builder) +{ + KVF_ASSERT(builder != NULL); + KVF_FREE(builder->shader_stages); + if(builder->vertex_input_state.pVertexAttributeDescriptions != NULL) + KVF_FREE((VkVertexInputAttributeDescription*)builder->vertex_input_state.pVertexAttributeDescriptions); + if(builder->vertex_input_state.pVertexBindingDescriptions != NULL) + KVF_FREE((VkVertexInputBindingDescription*)builder->vertex_input_state.pVertexBindingDescriptions); + KVF_FREE(builder); +} + +void kvfGPipelineBuilderReset(KvfGraphicsPipelineBuilder* builder) +{ + KVF_ASSERT(builder != NULL); + KVF_FREE(builder->shader_stages); + if(builder->vertex_input_state.pVertexAttributeDescriptions != NULL) + KVF_FREE((VkVertexInputAttributeDescription*)builder->vertex_input_state.pVertexAttributeDescriptions); + if(builder->vertex_input_state.pVertexBindingDescriptions != NULL) + KVF_FREE((VkVertexInputBindingDescription*)builder->vertex_input_state.pVertexBindingDescriptions); + memset(builder, 0, sizeof(KvfGraphicsPipelineBuilder)); + builder->vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + builder->input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; + builder->tessellation_state.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO; + builder->rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; + builder->depth_stencil_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; +} + +void kvfGPipelineBuilderSetInputTopology(KvfGraphicsPipelineBuilder* builder, VkPrimitiveTopology topology) +{ + KVF_ASSERT(builder != NULL); + builder->input_assembly_state.topology = topology; + builder->input_assembly_state.primitiveRestartEnable = VK_FALSE; +} + +void kvfGPipelineBuilderSetPolygonMode(KvfGraphicsPipelineBuilder* builder, VkPolygonMode polygon, float line_width) +{ + KVF_ASSERT(builder != NULL); + builder->rasterization_state.polygonMode = polygon; + builder->rasterization_state.lineWidth = line_width; +} + +void kvfGPipelineBuilderSetCullMode(KvfGraphicsPipelineBuilder* builder, VkCullModeFlags cull, VkFrontFace face) +{ + KVF_ASSERT(builder != NULL); + builder->rasterization_state.cullMode = cull; + builder->rasterization_state.frontFace = face; +} + +void kvfGPipelineBuilderDisableBlending(KvfGraphicsPipelineBuilder* builder) +{ + KVF_ASSERT(builder != NULL); + builder->color_blend_attachment_state.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; + builder->color_blend_attachment_state.blendEnable = VK_FALSE; +} + +void kvfGPipelineBuilderEnableAdditiveBlending(KvfGraphicsPipelineBuilder* builder) +{ + KVF_ASSERT(builder != NULL); + builder->color_blend_attachment_state.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; + builder->color_blend_attachment_state.blendEnable = VK_TRUE; + builder->color_blend_attachment_state.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; + builder->color_blend_attachment_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE; + builder->color_blend_attachment_state.colorBlendOp = VK_BLEND_OP_ADD; + builder->color_blend_attachment_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE; + builder->color_blend_attachment_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; + builder->color_blend_attachment_state.alphaBlendOp = VK_BLEND_OP_ADD; +} + +void kvfGPipelineBuilderEnableAlphaBlending(KvfGraphicsPipelineBuilder* builder) +{ + KVF_ASSERT(builder != NULL); + builder->color_blend_attachment_state.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; + builder->color_blend_attachment_state.blendEnable = VK_TRUE; + builder->color_blend_attachment_state.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; + builder->color_blend_attachment_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; + builder->color_blend_attachment_state.colorBlendOp = VK_BLEND_OP_ADD; + builder->color_blend_attachment_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE; + builder->color_blend_attachment_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; + builder->color_blend_attachment_state.alphaBlendOp = VK_BLEND_OP_ADD; +} + +void kvfGPipelineBuilderEnableDepthTest(KvfGraphicsPipelineBuilder* builder, VkCompareOp op, bool write_enabled) +{ + KVF_ASSERT(builder != NULL); + builder->depth_stencil_state.depthTestEnable = VK_TRUE; + builder->depth_stencil_state.depthWriteEnable = write_enabled; + builder->depth_stencil_state.depthCompareOp = op; + builder->depth_stencil_state.depthBoundsTestEnable = VK_FALSE; + builder->depth_stencil_state.stencilTestEnable = VK_FALSE; + builder->depth_stencil_state.minDepthBounds = 0.f; + builder->depth_stencil_state.maxDepthBounds = 1.f; +} + +void kvfGPipelineBuilderDisableDepthTest(KvfGraphicsPipelineBuilder* builder) +{ + KVF_ASSERT(builder != NULL); + builder->depth_stencil_state.depthTestEnable = VK_FALSE; + builder->depth_stencil_state.depthWriteEnable = VK_FALSE; + builder->depth_stencil_state.depthCompareOp = VK_COMPARE_OP_NEVER; + builder->depth_stencil_state.depthBoundsTestEnable = VK_FALSE; + builder->depth_stencil_state.stencilTestEnable = VK_FALSE; + builder->depth_stencil_state.minDepthBounds = 0.f; + builder->depth_stencil_state.maxDepthBounds = 1.f; +} + +void kvfGPipelineBuilderSetVertexInputs(KvfGraphicsPipelineBuilder* builder, VkVertexInputBindingDescription binds, VkVertexInputAttributeDescription* attributes, size_t attributes_count) +{ + KVF_ASSERT(builder != NULL); + KVF_ASSERT(attributes != NULL); + VkVertexInputBindingDescription* binds_ptr = (VkVertexInputBindingDescription*)KVF_MALLOC(sizeof(VkVertexInputBindingDescription)); + KVF_ASSERT(binds_ptr != NULL); + *binds_ptr = binds; + VkVertexInputAttributeDescription* attributes_descriptions = (VkVertexInputAttributeDescription*)KVF_MALLOC(sizeof(VkVertexInputAttributeDescription) * attributes_count); + KVF_ASSERT(attributes_descriptions != NULL); + memcpy(attributes_descriptions, attributes, sizeof(VkVertexInputAttributeDescription) * attributes_count); + builder->vertex_input_state.vertexBindingDescriptionCount = 1; + builder->vertex_input_state.pVertexBindingDescriptions = binds_ptr; + builder->vertex_input_state.vertexAttributeDescriptionCount = attributes_count; + builder->vertex_input_state.pVertexAttributeDescriptions = attributes_descriptions; +} + +void kvfGPipelineBuilderAddShaderStage(KvfGraphicsPipelineBuilder* builder, VkShaderStageFlagBits stage, VkShaderModule module, const char* entry) +{ + KVF_ASSERT(builder != NULL); + builder->shader_stages = (VkPipelineShaderStageCreateInfo*)KVF_REALLOC(builder->shader_stages, sizeof(VkPipelineShaderStageCreateInfo) * (builder->shader_stages_count + 1)); + KVF_ASSERT(builder->shader_stages != NULL); + memset(&builder->shader_stages[builder->shader_stages_count], 0, sizeof(VkPipelineShaderStageCreateInfo)); + char* entry_ptr = (char*)KVF_MALLOC(strlen(entry)); + KVF_ASSERT(entry_ptr != NULL); + strcpy(entry_ptr, entry); + builder->shader_stages[builder->shader_stages_count].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + builder->shader_stages[builder->shader_stages_count].stage = stage; + builder->shader_stages[builder->shader_stages_count].module = module; + builder->shader_stages[builder->shader_stages_count].pName = entry_ptr; + builder->shader_stages_count++; +} + +void kvfGPipelineBuilderResetShaderStages(KvfGraphicsPipelineBuilder* builder) +{ + KVF_ASSERT(builder != NULL); + if(builder->shader_stages == NULL) + return; + + for(size_t i = 0; builder->shader_stages_count; i++) + KVF_FREE((char*)builder->shader_stages[i].pName); + KVF_FREE(builder->shader_stages); + builder->shader_stages_count = 0; +} + +VkPipeline kvfCreateGraphicsPipeline(VkDevice device, VkPipelineLayout layout, KvfGraphicsPipelineBuilder* builder, VkRenderPass pass) +{ + KVF_ASSERT(builder != NULL); + KVF_ASSERT(device != VK_NULL_HANDLE); + + VkPipelineColorBlendStateCreateInfo color_blending = {}; + color_blending.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; + color_blending.logicOpEnable = VK_FALSE; + color_blending.logicOp = VK_LOGIC_OP_COPY; + color_blending.attachmentCount = 1; + color_blending.pAttachments = &builder->color_blend_attachment_state; + color_blending.blendConstants[0] = 0.0f; + color_blending.blendConstants[1] = 0.0f; + color_blending.blendConstants[2] = 0.0f; + color_blending.blendConstants[3] = 0.0f; + + VkDynamicState states[] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR }; + + VkPipelineDynamicStateCreateInfo dynamic_states = {}; + dynamic_states.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; + dynamic_states.dynamicStateCount = sizeof(states) / sizeof(VkDynamicState); + dynamic_states.pDynamicStates = states; + + VkPipelineViewportStateCreateInfo viewport_state = {}; + viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; + viewport_state.viewportCount = 1; + viewport_state.pViewports = NULL; + viewport_state.scissorCount = 1; + viewport_state.pScissors = NULL; + + VkPipelineMultisampleStateCreateInfo multisampling = {}; + multisampling.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; + multisampling.sampleShadingEnable = VK_FALSE; + multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; + + VkGraphicsPipelineCreateInfo pipeline_info = {}; + pipeline_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + pipeline_info.stageCount = builder->shader_stages_count; + pipeline_info.pStages = builder->shader_stages; + pipeline_info.pVertexInputState = &builder->vertex_input_state; + pipeline_info.pInputAssemblyState = &builder->input_assembly_state; + pipeline_info.pViewportState = &viewport_state; + pipeline_info.pRasterizationState = &builder->rasterization_state; + pipeline_info.pMultisampleState = &multisampling; + pipeline_info.pColorBlendState = &color_blending; + pipeline_info.pDynamicState = &dynamic_states; + pipeline_info.layout = layout; + pipeline_info.renderPass = pass; + pipeline_info.subpass = 0; + pipeline_info.basePipelineHandle = VK_NULL_HANDLE; + pipeline_info.pDepthStencilState = &builder->depth_stencil_state; + + VkPipeline pipeline; + __kvfCheckVk(vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &pipeline_info, NULL, &pipeline)); + return pipeline; +} + +void kvfDestroyPipeline(VkDevice device, VkPipeline pipeline) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + vkDestroyPipeline(device, pipeline, NULL); +} + +#endif // KVF_IMPLEMENTATION diff --git a/third_party/vma.h b/third_party/vma.h index 60f5720..2307325 100644 --- a/third_party/vma.h +++ b/third_party/vma.h @@ -1,19558 +1,18676 @@ -// -// Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. -// - -#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H -#define AMD_VULKAN_MEMORY_ALLOCATOR_H - -/** \mainpage Vulkan Memory Allocator - -Version 3.0.1 (2022-05-26) - -Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n -License: MIT - -API documentation divided into groups: [Modules](modules.html) - -\section main_table_of_contents Table of contents - -- User guide - - \subpage quick_start - - [Project setup](@ref quick_start_project_setup) - - [Initialization](@ref quick_start_initialization) - - [Resource allocation](@ref quick_start_resource_allocation) - - \subpage choosing_memory_type - - [Usage](@ref choosing_memory_type_usage) - - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags) - - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types) - - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools) - - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations) - - \subpage memory_mapping - - [Mapping functions](@ref memory_mapping_mapping_functions) - - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory) - - [Cache flush and invalidate](@ref memory_mapping_cache_control) - - \subpage staying_within_budget - - [Querying for budget](@ref staying_within_budget_querying_for_budget) - - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage) - - \subpage resource_aliasing - - \subpage custom_memory_pools - - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex) - - [Linear allocation algorithm](@ref linear_algorithm) - - [Free-at-once](@ref linear_algorithm_free_at_once) - - [Stack](@ref linear_algorithm_stack) - - [Double stack](@ref linear_algorithm_double_stack) - - [Ring buffer](@ref linear_algorithm_ring_buffer) - - \subpage defragmentation - - \subpage statistics - - [Numeric statistics](@ref statistics_numeric_statistics) - - [JSON dump](@ref statistics_json_dump) - - \subpage allocation_annotation - - [Allocation user data](@ref allocation_user_data) - - [Allocation names](@ref allocation_names) - - \subpage virtual_allocator - - \subpage debugging_memory_usage - - [Memory initialization](@ref debugging_memory_usage_initialization) - - [Margins](@ref debugging_memory_usage_margins) - - [Corruption detection](@ref debugging_memory_usage_corruption_detection) - - \subpage opengl_interop -- \subpage usage_patterns - - [GPU-only resource](@ref usage_patterns_gpu_only) - - [Staging copy for upload](@ref usage_patterns_staging_copy_upload) - - [Readback](@ref usage_patterns_readback) - - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading) - - [Other use cases](@ref usage_patterns_other_use_cases) -- \subpage configuration - - [Pointers to Vulkan functions](@ref config_Vulkan_functions) - - [Custom host memory allocator](@ref custom_memory_allocator) - - [Device memory allocation callbacks](@ref allocation_callbacks) - - [Device heap memory limit](@ref heap_memory_limit) -- Extension support - - \subpage vk_khr_dedicated_allocation - - \subpage enabling_buffer_device_address - - \subpage vk_ext_memory_priority - - \subpage vk_amd_device_coherent_memory -- \subpage general_considerations - - [Thread safety](@ref general_considerations_thread_safety) - - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility) - - [Validation layer warnings](@ref general_considerations_validation_layer_warnings) - - [Allocation algorithm](@ref general_considerations_allocation_algorithm) - - [Features not supported](@ref general_considerations_features_not_supported) - -\section main_see_also See also - -- [**Product page on GPUOpen**](https://gpuopen.com/gaming-product/vulkan-memory-allocator/) -- [**Source repository on GitHub**](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) - -\defgroup group_init Library initialization - -\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object. - -\defgroup group_alloc Memory allocation - -\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images. -Most basic ones being: vmaCreateBuffer(), vmaCreateImage(). - -\defgroup group_virtual Virtual allocator - -\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm -for user-defined purpose without allocating any real GPU memory. - -\defgroup group_stats Statistics - -\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format. -See documentation chapter: \ref statistics. -*/ - - -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef VULKAN_H_ - #include -#endif - -// Define this macro to declare maximum supported Vulkan version in format AAABBBCCC, -// where AAA = major, BBB = minor, CCC = patch. -// If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion. -#if !defined(VMA_VULKAN_VERSION) - #if defined(VK_VERSION_1_3) - #define VMA_VULKAN_VERSION 1003000 - #elif defined(VK_VERSION_1_2) - #define VMA_VULKAN_VERSION 1002000 - #elif defined(VK_VERSION_1_1) - #define VMA_VULKAN_VERSION 1001000 - #else - #define VMA_VULKAN_VERSION 1000000 - #endif -#endif - -#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS - extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; - extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; - extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; - extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; - extern PFN_vkAllocateMemory vkAllocateMemory; - extern PFN_vkFreeMemory vkFreeMemory; - extern PFN_vkMapMemory vkMapMemory; - extern PFN_vkUnmapMemory vkUnmapMemory; - extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; - extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; - extern PFN_vkBindBufferMemory vkBindBufferMemory; - extern PFN_vkBindImageMemory vkBindImageMemory; - extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; - extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; - extern PFN_vkCreateBuffer vkCreateBuffer; - extern PFN_vkDestroyBuffer vkDestroyBuffer; - extern PFN_vkCreateImage vkCreateImage; - extern PFN_vkDestroyImage vkDestroyImage; - extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; - #if VMA_VULKAN_VERSION >= 1001000 - extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; - extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; - extern PFN_vkBindBufferMemory2 vkBindBufferMemory2; - extern PFN_vkBindImageMemory2 vkBindImageMemory2; - extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; - #endif // #if VMA_VULKAN_VERSION >= 1001000 -#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES - -#if !defined(VMA_DEDICATED_ALLOCATION) - #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation - #define VMA_DEDICATED_ALLOCATION 1 - #else - #define VMA_DEDICATED_ALLOCATION 0 - #endif -#endif - -#if !defined(VMA_BIND_MEMORY2) - #if VK_KHR_bind_memory2 - #define VMA_BIND_MEMORY2 1 - #else - #define VMA_BIND_MEMORY2 0 - #endif -#endif - -#if !defined(VMA_MEMORY_BUDGET) - #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000) - #define VMA_MEMORY_BUDGET 1 - #else - #define VMA_MEMORY_BUDGET 0 - #endif -#endif - -// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers. -#if !defined(VMA_BUFFER_DEVICE_ADDRESS) - #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000 - #define VMA_BUFFER_DEVICE_ADDRESS 1 - #else - #define VMA_BUFFER_DEVICE_ADDRESS 0 - #endif -#endif - -// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers. -#if !defined(VMA_MEMORY_PRIORITY) - #if VK_EXT_memory_priority - #define VMA_MEMORY_PRIORITY 1 - #else - #define VMA_MEMORY_PRIORITY 0 - #endif -#endif - -// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers. -#if !defined(VMA_EXTERNAL_MEMORY) - #if VK_KHR_external_memory - #define VMA_EXTERNAL_MEMORY 1 - #else - #define VMA_EXTERNAL_MEMORY 0 - #endif -#endif - -// Define these macros to decorate all public functions with additional code, -// before and after returned type, appropriately. This may be useful for -// exporting the functions when compiling VMA as a separate library. Example: -// #define VMA_CALL_PRE __declspec(dllexport) -// #define VMA_CALL_POST __cdecl -#ifndef VMA_CALL_PRE - #define VMA_CALL_PRE -#endif -#ifndef VMA_CALL_POST - #define VMA_CALL_POST -#endif - -// Define this macro to decorate pointers with an attribute specifying the -// length of the array they point to if they are not null. -// -// The length may be one of -// - The name of another parameter in the argument list where the pointer is declared -// - The name of another member in the struct where the pointer is declared -// - The name of a member of a struct type, meaning the value of that member in -// the context of the call. For example -// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"), -// this means the number of memory heaps available in the device associated -// with the VmaAllocator being dealt with. -#ifndef VMA_LEN_IF_NOT_NULL - #define VMA_LEN_IF_NOT_NULL(len) -#endif - -// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang. -// see: https://clang.llvm.org/docs/AttributeReference.html#nullable -#ifndef VMA_NULLABLE - #ifdef __clang__ - #define VMA_NULLABLE _Nullable - #else - #define VMA_NULLABLE - #endif -#endif - -// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang. -// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull -#ifndef VMA_NOT_NULL - #ifdef __clang__ - #define VMA_NOT_NULL _Nonnull - #else - #define VMA_NOT_NULL - #endif -#endif - -// If non-dispatchable handles are represented as pointers then we can give -// then nullability annotations -#ifndef VMA_NOT_NULL_NON_DISPATCHABLE - #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) - #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL - #else - #define VMA_NOT_NULL_NON_DISPATCHABLE - #endif -#endif - -#ifndef VMA_NULLABLE_NON_DISPATCHABLE - #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) - #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE - #else - #define VMA_NULLABLE_NON_DISPATCHABLE - #endif -#endif - -#ifndef VMA_STATS_STRING_ENABLED - #define VMA_STATS_STRING_ENABLED 1 -#endif - -//////////////////////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////// -// -// INTERFACE -// -//////////////////////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////// - -// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE. -#ifndef _VMA_ENUM_DECLARATIONS - -/** -\addtogroup group_init -@{ -*/ - -/// Flags for created #VmaAllocator. -typedef enum VmaAllocatorCreateFlagBits -{ - /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. - - Using this flag may increase performance because internal mutexes are not used. - */ - VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, - /** \brief Enables usage of VK_KHR_dedicated_allocation extension. - - The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. - When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. - - Using this extension will automatically allocate dedicated blocks of memory for - some buffers and images instead of suballocating place for them out of bigger - memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT - flag) when it is recommended by the driver. It may improve performance on some - GPUs. - - You may set this flag only if you found out that following device extensions are - supported, you enabled them while creating Vulkan device passed as - VmaAllocatorCreateInfo::device, and you want them to be used internally by this - library: - - - VK_KHR_get_memory_requirements2 (device extension) - - VK_KHR_dedicated_allocation (device extension) - - When this flag is set, you can experience following warnings reported by Vulkan - validation layer. You can ignore them. - - > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. - */ - VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, - /** - Enables usage of VK_KHR_bind_memory2 extension. - - The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. - When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. - - You may set this flag only if you found out that this device extension is supported, - you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, - and you want it to be used internally by this library. - - The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, - which allow to pass a chain of `pNext` structures while binding. - This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2(). - */ - VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, - /** - Enables usage of VK_EXT_memory_budget extension. - - You may set this flag only if you found out that this device extension is supported, - you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, - and you want it to be used internally by this library, along with another instance extension - VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). - - The extension provides query for current memory usage and budget, which will probably - be more accurate than an estimation used by the library otherwise. - */ - VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008, - /** - Enables usage of VK_AMD_device_coherent_memory extension. - - You may set this flag only if you: - - - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, - - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device, - - want it to be used internally by this library. - - The extension and accompanying device feature provide access to memory types with - `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags. - They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR. - - When the extension is not enabled, such memory types are still enumerated, but their usage is illegal. - To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type, - returning `VK_ERROR_FEATURE_NOT_PRESENT`. - */ - VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010, - /** - Enables usage of "buffer device address" feature, which allows you to use function - `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader. - - You may set this flag only if you: - - 1. (For Vulkan version < 1.2) Found as available and enabled device extension - VK_KHR_buffer_device_address. - This extension is promoted to core Vulkan 1.2. - 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`. - - When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA. - The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to - allocated memory blocks wherever it might be needed. - - For more information, see documentation chapter \ref enabling_buffer_device_address. - */ - VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020, - /** - Enables usage of VK_EXT_memory_priority extension in the library. - - You may set this flag only if you found available and enabled this device extension, - along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`, - while creating Vulkan device passed as VmaAllocatorCreateInfo::device. - - When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority - are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored. - - A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. - Larger values are higher priority. The granularity of the priorities is implementation-dependent. - It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`. - The value to be used for default priority is 0.5. - For more details, see the documentation of the VK_EXT_memory_priority extension. - */ - VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040, - - VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaAllocatorCreateFlagBits; -/// See #VmaAllocatorCreateFlagBits. -typedef VkFlags VmaAllocatorCreateFlags; - -/** @} */ - -/** -\addtogroup group_alloc -@{ -*/ - -/// \brief Intended usage of the allocated memory. -typedef enum VmaMemoryUsage -{ - /** No intended memory usage specified. - Use other members of VmaAllocationCreateInfo to specify your requirements. - */ - VMA_MEMORY_USAGE_UNKNOWN = 0, - /** - \deprecated Obsolete, preserved for backward compatibility. - Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. - */ - VMA_MEMORY_USAGE_GPU_ONLY = 1, - /** - \deprecated Obsolete, preserved for backward compatibility. - Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`. - */ - VMA_MEMORY_USAGE_CPU_ONLY = 2, - /** - \deprecated Obsolete, preserved for backward compatibility. - Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. - */ - VMA_MEMORY_USAGE_CPU_TO_GPU = 3, - /** - \deprecated Obsolete, preserved for backward compatibility. - Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. - */ - VMA_MEMORY_USAGE_GPU_TO_CPU = 4, - /** - \deprecated Obsolete, preserved for backward compatibility. - Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. - */ - VMA_MEMORY_USAGE_CPU_COPY = 5, - /** - Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. - Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. - - Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. - - Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. - */ - VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, - /** - Selects best memory type automatically. - This flag is recommended for most common use cases. - - When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), - you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT - in VmaAllocationCreateInfo::flags. - - It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. - vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() - and not with generic memory allocation functions. - */ - VMA_MEMORY_USAGE_AUTO = 7, - /** - Selects best memory type automatically with preference for GPU (device) memory. - - When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), - you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT - in VmaAllocationCreateInfo::flags. - - It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. - vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() - and not with generic memory allocation functions. - */ - VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8, - /** - Selects best memory type automatically with preference for CPU (host) memory. - - When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), - you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT - in VmaAllocationCreateInfo::flags. - - It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. - vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() - and not with generic memory allocation functions. - */ - VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9, - - VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF -} VmaMemoryUsage; - -/// Flags to be passed as VmaAllocationCreateInfo::flags. -typedef enum VmaAllocationCreateFlagBits -{ - /** \brief Set this flag if the allocation should have its own memory block. - - Use it for special, big resources, like fullscreen images used as attachments. - */ - VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, - - /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. - - If new allocation cannot be placed in any of the existing blocks, allocation - fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. - - You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and - #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. - */ - VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, - /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. - - Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. - - It is valid to use this flag for allocation made from memory type that is not - `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is - useful if you need an allocation that is efficient to use on GPU - (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that - support it (e.g. Intel GPU). - */ - VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, - /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead. - - Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a - null-terminated string. Instead of copying pointer value, a local copy of the - string is made and stored in allocation's `pName`. The string is automatically - freed together with the allocation. It is also used in vmaBuildStatsString(). - */ - VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, - /** Allocation will be created from upper stack in a double stack pool. - - This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. - */ - VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, - /** Create both buffer/image and allocation, but don't bind them together. - It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. - The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). - Otherwise it is ignored. - - If you want to make sure the new buffer/image is not tied to the new memory allocation - through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block, - use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT. - */ - VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, - /** Create allocation only if additional device memory required for it, if any, won't exceed - memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. - */ - VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, - /** \brief Set this flag if the allocated memory will have aliasing resources. - - Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified. - Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors. - */ - VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200, - /** - Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). - - - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, - you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. - - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. - This includes allocations created in \ref custom_memory_pools. - - Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number, - never read or accessed randomly, so a memory type can be selected that is uncached and write-combined. - - \warning Violating this declaration may work correctly, but will likely be very slow. - Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;` - Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once. - */ - VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400, - /** - Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). - - - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, - you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. - - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. - This includes allocations created in \ref custom_memory_pools. - - Declares that mapped memory can be read, written, and accessed in random order, - so a `HOST_CACHED` memory type is required. - */ - VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800, - /** - Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT, - it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected - if it may improve performance. - - By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type - (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and - issue an explicit transfer to write/read your data. - To prepare for this possibility, don't forget to add appropriate flags like - `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image. - */ - VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000, - /** Allocation strategy that chooses smallest possible free range for the allocation - to minimize memory usage and fragmentation, possibly at the expense of allocation time. - */ - VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000, - /** Allocation strategy that chooses first suitable free range for the allocation - - not necessarily in terms of the smallest offset but the one that is easiest and fastest to find - to minimize allocation time, possibly at the expense of allocation quality. - */ - VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000, - /** Allocation strategy that chooses always the lowest offset in available space. - This is not the most efficient strategy but achieves highly packed data. - Used internally by defragmentation, not recomended in typical usage. - */ - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000, - /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT. - */ - VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, - /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT. - */ - VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, - /** A bit mask to extract only `STRATEGY` bits from entire set of flags. - */ - VMA_ALLOCATION_CREATE_STRATEGY_MASK = - VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT | - VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - - VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaAllocationCreateFlagBits; -/// See #VmaAllocationCreateFlagBits. -typedef VkFlags VmaAllocationCreateFlags; - -/// Flags to be passed as VmaPoolCreateInfo::flags. -typedef enum VmaPoolCreateFlagBits -{ - /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. - - This is an optional optimization flag. - - If you always allocate using vmaCreateBuffer(), vmaCreateImage(), - vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator - knows exact type of your allocations so it can handle Buffer-Image Granularity - in the optimal way. - - If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), - exact type of such allocations is not known, so allocator must be conservative - in handling Buffer-Image Granularity, which can lead to suboptimal allocation - (wasted memory). In that case, if you can make sure you always allocate only - buffers and linear images or only optimal images out of this pool, use this flag - to make allocator disregard Buffer-Image Granularity and so make allocations - faster and more optimal. - */ - VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, - - /** \brief Enables alternative, linear allocation algorithm in this pool. - - Specify this flag to enable linear allocation algorithm, which always creates - new allocations after last one and doesn't reuse space from allocations freed in - between. It trades memory consumption for simplified algorithm and data - structure, which has better performance and uses less memory for metadata. - - By using this flag, you can achieve behavior of free-at-once, stack, - ring buffer, and double stack. - For details, see documentation chapter \ref linear_algorithm. - */ - VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, - - /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. - */ - VMA_POOL_CREATE_ALGORITHM_MASK = - VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT, - - VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaPoolCreateFlagBits; -/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits. -typedef VkFlags VmaPoolCreateFlags; - -/// Flags to be passed as VmaDefragmentationInfo::flags. -typedef enum VmaDefragmentationFlagBits -{ - /* \brief Use simple but fast algorithm for defragmentation. - May not achieve best results but will require least time to compute and least allocations to copy. - */ - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1, - /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified. - Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved. - */ - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2, - /* \brief Perform full defragmentation of memory. - Can result in notably more time to compute and allocations to copy, but will achieve best memory packing. - */ - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4, - /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make. - Only available when bufferImageGranularity is greater than 1, since it aims to reduce - alignment issues between different types of resources. - Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT. - */ - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8, - - /// A bit mask to extract only `ALGORITHM` bits from entire set of flags. - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK = - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT | - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT | - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT | - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT, - - VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaDefragmentationFlagBits; -/// See #VmaDefragmentationFlagBits. -typedef VkFlags VmaDefragmentationFlags; - -/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove. -typedef enum VmaDefragmentationMoveOperation -{ - /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass(). - VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0, - /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged. - VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1, - /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed. - VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2, -} VmaDefragmentationMoveOperation; - -/** @} */ - -/** -\addtogroup group_virtual -@{ -*/ - -/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. -typedef enum VmaVirtualBlockCreateFlagBits -{ - /** \brief Enables alternative, linear allocation algorithm in this virtual block. - - Specify this flag to enable linear allocation algorithm, which always creates - new allocations after last one and doesn't reuse space from allocations freed in - between. It trades memory consumption for simplified algorithm and data - structure, which has better performance and uses less memory for metadata. - - By using this flag, you can achieve behavior of free-at-once, stack, - ring buffer, and double stack. - For details, see documentation chapter \ref linear_algorithm. - */ - VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001, - - /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags. - */ - VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK = - VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT, - - VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaVirtualBlockCreateFlagBits; -/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits. -typedef VkFlags VmaVirtualBlockCreateFlags; - -/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. -typedef enum VmaVirtualAllocationCreateFlagBits -{ - /** \brief Allocation will be created from upper stack in a double stack pool. - - This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT, - /** \brief Allocation strategy that tries to minimize memory usage. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, - /** \brief Allocation strategy that tries to minimize allocation time. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, - /** Allocation strategy that chooses always the lowest offset in available space. - This is not the most efficient strategy but achieves highly packed data. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags. - - These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK, - - VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaVirtualAllocationCreateFlagBits; -/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits. -typedef VkFlags VmaVirtualAllocationCreateFlags; - -/** @} */ - -#endif // _VMA_ENUM_DECLARATIONS - -#ifndef _VMA_DATA_TYPES_DECLARATIONS - -/** -\addtogroup group_init -@{ */ - -/** \struct VmaAllocator -\brief Represents main object of this library initialized. - -Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it. -Call function vmaDestroyAllocator() to destroy it. - -It is recommended to create just one object of this type per `VkDevice` object, -right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed. -*/ -VK_DEFINE_HANDLE(VmaAllocator) - -/** @} */ - -/** -\addtogroup group_alloc -@{ -*/ - -/** \struct VmaPool -\brief Represents custom memory pool - -Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it. -Call function vmaDestroyPool() to destroy it. - -For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools). -*/ -VK_DEFINE_HANDLE(VmaPool) - -/** \struct VmaAllocation -\brief Represents single memory allocation. - -It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type -plus unique offset. - -There are multiple ways to create such object. -You need to fill structure VmaAllocationCreateInfo. -For more information see [Choosing memory type](@ref choosing_memory_type). - -Although the library provides convenience functions that create Vulkan buffer or image, -allocate memory for it and bind them together, -binding of the allocation to a buffer or an image is out of scope of the allocation itself. -Allocation object can exist without buffer/image bound, -binding can be done manually by the user, and destruction of it can be done -independently of destruction of the allocation. - -The object also remembers its size and some other information. -To retrieve this information, use function vmaGetAllocationInfo() and inspect -returned structure VmaAllocationInfo. -*/ -VK_DEFINE_HANDLE(VmaAllocation) - -/** \struct VmaDefragmentationContext -\brief An opaque object that represents started defragmentation process. - -Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it. -Call function vmaEndDefragmentation() to destroy it. -*/ -VK_DEFINE_HANDLE(VmaDefragmentationContext) - -/** @} */ - -/** -\addtogroup group_virtual -@{ -*/ - -/** \struct VmaVirtualAllocation -\brief Represents single memory allocation done inside VmaVirtualBlock. - -Use it as a unique identifier to virtual allocation within the single block. - -Use value `VK_NULL_HANDLE` to represent a null/invalid allocation. -*/ -VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation); - -/** @} */ - -/** -\addtogroup group_virtual -@{ -*/ - -/** \struct VmaVirtualBlock -\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory. - -Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it. -For more information, see documentation chapter \ref virtual_allocator. - -This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally. -*/ -VK_DEFINE_HANDLE(VmaVirtualBlock) - -/** @} */ - -/** -\addtogroup group_init -@{ -*/ - -/// Callback function called after successful vkAllocateMemory. -typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryType, - VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, - VkDeviceSize size, - void* VMA_NULLABLE pUserData); - -/// Callback function called before vkFreeMemory. -typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryType, - VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, - VkDeviceSize size, - void* VMA_NULLABLE pUserData); - -/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. - -Provided for informative purpose, e.g. to gather statistics about number of -allocations or total amount of memory allocated in Vulkan. - -Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. -*/ -typedef struct VmaDeviceMemoryCallbacks -{ - /// Optional, can be null. - PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate; - /// Optional, can be null. - PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree; - /// Optional, can be null. - void* VMA_NULLABLE pUserData; -} VmaDeviceMemoryCallbacks; - -/** \brief Pointers to some Vulkan functions - a subset used by the library. - -Used in VmaAllocatorCreateInfo::pVulkanFunctions. -*/ -typedef struct VmaVulkanFunctions -{ - /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. - PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr; - /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. - PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr; - PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties; - PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties; - PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory; - PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory; - PFN_vkMapMemory VMA_NULLABLE vkMapMemory; - PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory; - PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges; - PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges; - PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory; - PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory; - PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements; - PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements; - PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer; - PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer; - PFN_vkCreateImage VMA_NULLABLE vkCreateImage; - PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage; - PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer; -#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. - PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR; - /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. - PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR; -#endif -#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 - /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension. - PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR; - /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension. - PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR; -#endif -#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 - PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR; -#endif -#if VMA_VULKAN_VERSION >= 1003000 - /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. - PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements; - /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. - PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements; -#endif -} VmaVulkanFunctions; - -/// Description of a Allocator to be created. -typedef struct VmaAllocatorCreateInfo -{ - /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum. - VmaAllocatorCreateFlags flags; - /// Vulkan physical device. - /** It must be valid throughout whole lifetime of created allocator. */ - VkPhysicalDevice VMA_NOT_NULL physicalDevice; - /// Vulkan device. - /** It must be valid throughout whole lifetime of created allocator. */ - VkDevice VMA_NOT_NULL device; - /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional. - /** Set to 0 to use default, which is currently 256 MiB. */ - VkDeviceSize preferredLargeHeapBlockSize; - /// Custom CPU memory allocation callbacks. Optional. - /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */ - const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; - /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. - /** Optional, can be null. */ - const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks; - /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. - - If not NULL, it must be a pointer to an array of - `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on - maximum number of bytes that can be allocated out of particular Vulkan memory - heap. - - Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that - heap. This is also the default in case of `pHeapSizeLimit` = NULL. - - If there is a limit defined for a heap: - - - If user tries to allocate more memory from that heap using this allocator, - the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. - - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the - value of this limit will be reported instead when using vmaGetMemoryProperties(). - - Warning! Using this feature may not be equivalent to installing a GPU with - smaller amount of memory, because graphics driver doesn't necessary fail new - allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is - exceeded. It may return success and just silently migrate some device memory - blocks to system RAM. This driver behavior can also be controlled using - VK_AMD_memory_overallocation_behavior extension. - */ - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit; - - /** \brief Pointers to Vulkan functions. Can be null. - - For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions). - */ - const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions; - /** \brief Handle to Vulkan instance object. - - Starting from version 3.0.0 this member is no longer optional, it must be set! - */ - VkInstance VMA_NOT_NULL instance; - /** \brief Optional. The highest version of Vulkan that the application is designed to use. - - It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`. - The patch version number specified is ignored. Only the major and minor versions are considered. - It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`. - Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation. - Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`. - */ - uint32_t vulkanApiVersion; -#if VMA_EXTERNAL_MEMORY - /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type. - - If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount` - elements, defining external memory handle types of particular Vulkan memory type, - to be passed using `VkExportMemoryAllocateInfoKHR`. - - Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type. - This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL. - */ - const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes; -#endif // #if VMA_EXTERNAL_MEMORY -} VmaAllocatorCreateInfo; - -/// Information about existing #VmaAllocator object. -typedef struct VmaAllocatorInfo -{ - /** \brief Handle to Vulkan instance object. - - This is the same value as has been passed through VmaAllocatorCreateInfo::instance. - */ - VkInstance VMA_NOT_NULL instance; - /** \brief Handle to Vulkan physical device object. - - This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice. - */ - VkPhysicalDevice VMA_NOT_NULL physicalDevice; - /** \brief Handle to Vulkan device object. - - This is the same value as has been passed through VmaAllocatorCreateInfo::device. - */ - VkDevice VMA_NOT_NULL device; -} VmaAllocatorInfo; - -/** @} */ - -/** -\addtogroup group_stats -@{ -*/ - -/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total. - -These are fast to calculate. -See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics(). -*/ -typedef struct VmaStatistics -{ - /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated. - */ - uint32_t blockCount; - /** \brief Number of #VmaAllocation objects allocated. - - Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`. - */ - uint32_t allocationCount; - /** \brief Number of bytes allocated in `VkDeviceMemory` blocks. - - \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object - (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls - "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image. - */ - VkDeviceSize blockBytes; - /** \brief Total number of bytes occupied by all #VmaAllocation objects. - - Always less or equal than `blockBytes`. - Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan - but unused by any #VmaAllocation. - */ - VkDeviceSize allocationBytes; -} VmaStatistics; - -/** \brief More detailed statistics than #VmaStatistics. - -These are slower to calculate. Use for debugging purposes. -See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics(). - -Previous version of the statistics API provided averages, but they have been removed -because they can be easily calculated as: - -\code -VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount; -VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes; -VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount; -\endcode -*/ -typedef struct VmaDetailedStatistics -{ - /// Basic statistics. - VmaStatistics statistics; - /// Number of free ranges of memory between allocations. - uint32_t unusedRangeCount; - /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations. - VkDeviceSize allocationSizeMin; - /// Largest allocation size. 0 if there are 0 allocations. - VkDeviceSize allocationSizeMax; - /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges. - VkDeviceSize unusedRangeSizeMin; - /// Largest empty range size. 0 if there are 0 empty ranges. - VkDeviceSize unusedRangeSizeMax; -} VmaDetailedStatistics; - -/** \brief General statistics from current state of the Allocator - -total memory usage across all memory heaps and types. - -These are slower to calculate. Use for debugging purposes. -See function vmaCalculateStatistics(). -*/ -typedef struct VmaTotalStatistics -{ - VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES]; - VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS]; - VmaDetailedStatistics total; -} VmaTotalStatistics; - -/** \brief Statistics of current memory usage and available budget for a specific memory heap. - -These are fast to calculate. -See function vmaGetHeapBudgets(). -*/ -typedef struct VmaBudget -{ - /** \brief Statistics fetched from the library. - */ - VmaStatistics statistics; - /** \brief Estimated current memory usage of the program, in bytes. - - Fetched from system using VK_EXT_memory_budget extension if enabled. - - It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects - also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or - `VkDeviceMemory` blocks allocated outside of this library, if any. - */ - VkDeviceSize usage; - /** \brief Estimated amount of memory available to the program, in bytes. - - Fetched from system using VK_EXT_memory_budget extension if enabled. - - It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors - external to the program, decided by the operating system. - Difference `budget - usage` is the amount of additional memory that can probably - be allocated without problems. Exceeding the budget may result in various problems. - */ - VkDeviceSize budget; -} VmaBudget; - -/** @} */ - -/** -\addtogroup group_alloc -@{ -*/ - -/** \brief Parameters of new #VmaAllocation. - -To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others. -*/ -typedef struct VmaAllocationCreateInfo -{ - /// Use #VmaAllocationCreateFlagBits enum. - VmaAllocationCreateFlags flags; - /** \brief Intended usage of memory. - - You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n - If `pool` is not null, this member is ignored. - */ - VmaMemoryUsage usage; - /** \brief Flags that must be set in a Memory Type chosen for an allocation. - - Leave 0 if you specify memory requirements in other way. \n - If `pool` is not null, this member is ignored.*/ - VkMemoryPropertyFlags requiredFlags; - /** \brief Flags that preferably should be set in a memory type chosen for an allocation. - - Set to 0 if no additional flags are preferred. \n - If `pool` is not null, this member is ignored. */ - VkMemoryPropertyFlags preferredFlags; - /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. - - Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if - it meets other requirements specified by this structure, with no further - restrictions on memory type index. \n - If `pool` is not null, this member is ignored. - */ - uint32_t memoryTypeBits; - /** \brief Pool that this allocation should be created in. - - Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: - `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. - */ - VmaPool VMA_NULLABLE pool; - /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). - - If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either - null or pointer to a null-terminated string. The string will be then copied to - internal buffer, so it doesn't need to be valid after allocation call. - */ - void* VMA_NULLABLE pUserData; - /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. - - It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object - and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. - Otherwise, it has the priority of a memory block where it is placed and this variable is ignored. - */ - float priority; -} VmaAllocationCreateInfo; - -/// Describes parameter of created #VmaPool. -typedef struct VmaPoolCreateInfo -{ - /** \brief Vulkan memory type index to allocate this pool from. - */ - uint32_t memoryTypeIndex; - /** \brief Use combination of #VmaPoolCreateFlagBits. - */ - VmaPoolCreateFlags flags; - /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional. - - Specify nonzero to set explicit, constant size of memory blocks used by this - pool. - - Leave 0 to use default and let the library manage block sizes automatically. - Sizes of particular blocks may vary. - In this case, the pool will also support dedicated allocations. - */ - VkDeviceSize blockSize; - /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. - - Set to 0 to have no preallocated blocks and allow the pool be completely empty. - */ - size_t minBlockCount; - /** \brief Maximum number of blocks that can be allocated in this pool. Optional. - - Set to 0 to use default, which is `SIZE_MAX`, which means no limit. - - Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated - throughout whole lifetime of this pool. - */ - size_t maxBlockCount; - /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations. - - It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object. - Otherwise, this variable is ignored. - */ - float priority; - /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0. - - Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two. - It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough, - e.g. when doing interop with OpenGL. - */ - VkDeviceSize minAllocationAlignment; - /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional. - - Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`. - It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`. - Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool. - - Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`, - can be attached automatically by this library when using other, more convenient of its features. - */ - void* VMA_NULLABLE pMemoryAllocateNext; -} VmaPoolCreateInfo; - -/** @} */ - -/** -\addtogroup group_alloc -@{ -*/ - -/// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo(). -typedef struct VmaAllocationInfo -{ - /** \brief Memory type index that this allocation was allocated from. - - It never changes. - */ - uint32_t memoryType; - /** \brief Handle to Vulkan memory object. - - Same memory object can be shared by multiple allocations. - - It can change after the allocation is moved during \ref defragmentation. - */ - VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory; - /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation. - - You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function - vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image, - not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation - and apply this offset automatically. - - It can change after the allocation is moved during \ref defragmentation. - */ - VkDeviceSize offset; - /** \brief Size of this allocation, in bytes. - - It never changes. - - \note Allocation size returned in this variable may be greater than the size - requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the - allocation is accessible for operations on memory e.g. using a pointer after - mapping with vmaMapMemory(), but operations on the resource e.g. using - `vkCmdCopyBuffer` must be limited to the size of the resource. - */ - VkDeviceSize size; - /** \brief Pointer to the beginning of this allocation as mapped data. - - If the allocation hasn't been mapped using vmaMapMemory() and hasn't been - created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null. - - It can change after call to vmaMapMemory(), vmaUnmapMemory(). - It can also change after the allocation is moved during \ref defragmentation. - */ - void* VMA_NULLABLE pMappedData; - /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). - - It can change after call to vmaSetAllocationUserData() for this allocation. - */ - void* VMA_NULLABLE pUserData; - /** \brief Custom allocation name that was set with vmaSetAllocationName(). - - It can change after call to vmaSetAllocationName() for this allocation. - - Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with - additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED]. - */ - const char* VMA_NULLABLE pName; -} VmaAllocationInfo; - -/** \brief Parameters for defragmentation. - -To be used with function vmaBeginDefragmentation(). -*/ -typedef struct VmaDefragmentationInfo -{ - /// \brief Use combination of #VmaDefragmentationFlagBits. - VmaDefragmentationFlags flags; - /** \brief Custom pool to be defragmented. - - If null then default pools will undergo defragmentation process. - */ - VmaPool VMA_NULLABLE pool; - /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places. - - `0` means no limit. - */ - VkDeviceSize maxBytesPerPass; - /** \brief Maximum number of allocations that can be moved during single pass to a different place. - - `0` means no limit. - */ - uint32_t maxAllocationsPerPass; -} VmaDefragmentationInfo; - -/// Single move of an allocation to be done for defragmentation. -typedef struct VmaDefragmentationMove -{ - /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it. - VmaDefragmentationMoveOperation operation; - /// Allocation that should be moved. - VmaAllocation VMA_NOT_NULL srcAllocation; - /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`. - - \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass, - to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory(). - vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory. - */ - VmaAllocation VMA_NOT_NULL dstTmpAllocation; -} VmaDefragmentationMove; - -/** \brief Parameters for incremental defragmentation steps. - -To be used with function vmaBeginDefragmentationPass(). -*/ -typedef struct VmaDefragmentationPassMoveInfo -{ - /// Number of elements in the `pMoves` array. - uint32_t moveCount; - /** \brief Array of moves to be performed by the user in the current defragmentation pass. - - Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass(). - - For each element, you should: - - 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset. - 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`. - 3. Make sure these commands finished executing on the GPU. - 4. Destroy the old buffer/image. - - Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass(). - After this call, the allocation will point to the new place in memory. - - Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. - - Alternatively, if you decide you want to completely remove the allocation: - - 1. Destroy its buffer/image. - 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. - - Then, after vmaEndDefragmentationPass() the allocation will be freed. - */ - VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves; -} VmaDefragmentationPassMoveInfo; - -/// Statistics returned for defragmentation process in function vmaEndDefragmentation(). -typedef struct VmaDefragmentationStats -{ - /// Total number of bytes that have been copied while moving allocations to different places. - VkDeviceSize bytesMoved; - /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects. - VkDeviceSize bytesFreed; - /// Number of allocations that have been moved to different places. - uint32_t allocationsMoved; - /// Number of empty `VkDeviceMemory` objects that have been released to the system. - uint32_t deviceMemoryBlocksFreed; -} VmaDefragmentationStats; - -/** @} */ - -/** -\addtogroup group_virtual -@{ -*/ - -/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock(). -typedef struct VmaVirtualBlockCreateInfo -{ - /** \brief Total size of the virtual block. - - Sizes can be expressed in bytes or any units you want as long as you are consistent in using them. - For example, if you allocate from some array of structures, 1 can mean single instance of entire structure. - */ - VkDeviceSize size; - - /** \brief Use combination of #VmaVirtualBlockCreateFlagBits. - */ - VmaVirtualBlockCreateFlags flags; - - /** \brief Custom CPU memory allocation callbacks. Optional. - - Optional, can be null. When specified, they will be used for all CPU-side memory allocations. - */ - const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; -} VmaVirtualBlockCreateInfo; - -/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate(). -typedef struct VmaVirtualAllocationCreateInfo -{ - /** \brief Size of the allocation. - - Cannot be zero. - */ - VkDeviceSize size; - /** \brief Required alignment of the allocation. Optional. - - Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset. - */ - VkDeviceSize alignment; - /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits. - */ - VmaVirtualAllocationCreateFlags flags; - /** \brief Custom pointer to be associated with the allocation. Optional. - - It can be any value and can be used for user-defined purposes. It can be fetched or changed later. - */ - void* VMA_NULLABLE pUserData; -} VmaVirtualAllocationCreateInfo; - -/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo(). -typedef struct VmaVirtualAllocationInfo -{ - /** \brief Offset of the allocation. - - Offset at which the allocation was made. - */ - VkDeviceSize offset; - /** \brief Size of the allocation. - - Same value as passed in VmaVirtualAllocationCreateInfo::size. - */ - VkDeviceSize size; - /** \brief Custom pointer associated with the allocation. - - Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData(). - */ - void* VMA_NULLABLE pUserData; -} VmaVirtualAllocationInfo; - -/** @} */ - -#endif // _VMA_DATA_TYPES_DECLARATIONS - -#ifndef _VMA_FUNCTION_HEADERS - -/** -\addtogroup group_init -@{ -*/ - -/// Creates #VmaAllocator object. -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( - const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator); - -/// Destroys allocator object. -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( - VmaAllocator VMA_NULLABLE allocator); - -/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc. - -It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to -`VkPhysicalDevice`, `VkDevice` etc. every time using this function. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo); - -/** -PhysicalDeviceProperties are fetched from physicalDevice by the allocator. -You can access it here, without fetching it again on your own. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( - VmaAllocator VMA_NOT_NULL allocator, - const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties); - -/** -PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. -You can access it here, without fetching it again on your own. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( - VmaAllocator VMA_NOT_NULL allocator, - const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties); - -/** -\brief Given Memory Type Index, returns Property Flags of this memory type. - -This is just a convenience function. Same information can be obtained using -vmaGetMemoryProperties(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryTypeIndex, - VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); - -/** \brief Sets index of the current frame. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t frameIndex); - -/** @} */ - -/** -\addtogroup group_stats -@{ -*/ - -/** \brief Retrieves statistics from current state of the Allocator. - -This function is called "calculate" not "get" because it has to traverse all -internal data structures, so it may be quite slow. Use it for debugging purposes. -For faster but more brief statistics suitable to be called every frame or every allocation, -use vmaGetHeapBudgets(). - -Note that when using allocator from multiple threads, returned information may immediately -become outdated. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( - VmaAllocator VMA_NOT_NULL allocator, - VmaTotalStatistics* VMA_NOT_NULL pStats); - -/** \brief Retrieves information about current memory usage and budget for all memory heaps. - -\param allocator -\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used. - -This function is called "get" not "calculate" because it is very fast, suitable to be called -every frame or every allocation. For more detailed statistics use vmaCalculateStatistics(). - -Note that when using allocator from multiple threads, returned information may immediately -become outdated. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( - VmaAllocator VMA_NOT_NULL allocator, - VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets); - -/** @} */ - -/** -\addtogroup group_alloc -@{ -*/ - -/** -\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo. - -This algorithm tries to find a memory type that: - -- Is allowed by memoryTypeBits. -- Contains all the flags from pAllocationCreateInfo->requiredFlags. -- Matches intended usage. -- Has as many flags from pAllocationCreateInfo->preferredFlags as possible. - -\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result -from this function or any other allocating function probably means that your -device doesn't support any memory type with requested features for the specific -type of resource you want to use it for. Please check parameters of your -resource, like image layout (OPTIMAL versus LINEAR) or mip level count. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryTypeBits, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - uint32_t* VMA_NOT_NULL pMemoryTypeIndex); - -/** -\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo. - -It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. -It internally creates a temporary, dummy buffer that never has memory bound. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( - VmaAllocator VMA_NOT_NULL allocator, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - uint32_t* VMA_NOT_NULL pMemoryTypeIndex); - -/** -\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo. - -It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. -It internally creates a temporary, dummy image that never has memory bound. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( - VmaAllocator VMA_NOT_NULL allocator, - const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - uint32_t* VMA_NOT_NULL pMemoryTypeIndex); - -/** \brief Allocates Vulkan device memory and creates #VmaPool object. - -\param allocator Allocator object. -\param pCreateInfo Parameters of pool to create. -\param[out] pPool Handle to created pool. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( - VmaAllocator VMA_NOT_NULL allocator, - const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool); - -/** \brief Destroys #VmaPool object and frees Vulkan device memory. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NULLABLE pool); - -/** @} */ - -/** -\addtogroup group_stats -@{ -*/ - -/** \brief Retrieves statistics of existing #VmaPool object. - -\param allocator Allocator object. -\param pool Pool object. -\param[out] pPoolStats Statistics of specified pool. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool, - VmaStatistics* VMA_NOT_NULL pPoolStats); - -/** \brief Retrieves detailed statistics of existing #VmaPool object. - -\param allocator Allocator object. -\param pool Pool object. -\param[out] pPoolStats Statistics of specified pool. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool, - VmaDetailedStatistics* VMA_NOT_NULL pPoolStats); - -/** @} */ - -/** -\addtogroup group_alloc -@{ -*/ - -/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. - -Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, -`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is -`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). - -Possible return values: - -- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool. -- `VK_SUCCESS` - corruption detection has been performed and succeeded. -- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. - `VMA_ASSERT` is also fired in that case. -- Other value: Error returned by Vulkan, e.g. memory mapping failure. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool); - -/** \brief Retrieves name of a custom pool. - -After the call `ppName` is either null or points to an internally-owned null-terminated string -containing name of the pool that was previously set. The pointer becomes invalid when the pool is -destroyed or its name is changed using vmaSetPoolName(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool, - const char* VMA_NULLABLE* VMA_NOT_NULL ppName); - -/** \brief Sets name of a custom pool. - -`pName` can be either null or pointer to a null-terminated string with new name for the pool. -Function makes internal copy of the string, so it can be changed or freed immediately after this call. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool, - const char* VMA_NULLABLE pName); - -/** \brief General purpose memory allocation. - -\param allocator -\param pVkMemoryRequirements -\param pCreateInfo -\param[out] pAllocation Handle to allocated memory. -\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). - -You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). - -It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), -vmaCreateBuffer(), vmaCreateImage() instead whenever possible. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( - VmaAllocator VMA_NOT_NULL allocator, - const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements, - const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); - -/** \brief General purpose memory allocation for multiple allocation objects at once. - -\param allocator Allocator object. -\param pVkMemoryRequirements Memory requirements for each allocation. -\param pCreateInfo Creation parameters for each allocation. -\param allocationCount Number of allocations to make. -\param[out] pAllocations Pointer to array that will be filled with handles to created allocations. -\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. - -You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). - -Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. -It is just a general purpose allocation function able to make multiple allocations at once. -It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times. - -All allocations are made using same parameters. All of them are created out of the same memory pool and type. -If any allocation fails, all allocations already made within this function call are also freed, so that when -returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( - VmaAllocator VMA_NOT_NULL allocator, - const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements, - const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo, - size_t allocationCount, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, - VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo); - -/** \brief Allocates memory suitable for given `VkBuffer`. - -\param allocator -\param buffer -\param pCreateInfo -\param[out] pAllocation Handle to allocated memory. -\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). - -It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory(). - -This is a special-purpose function. In most cases you should use vmaCreateBuffer(). - -You must free the allocation using vmaFreeMemory() when no longer needed. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( - VmaAllocator VMA_NOT_NULL allocator, - VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, - const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); - -/** \brief Allocates memory suitable for given `VkImage`. - -\param allocator -\param image -\param pCreateInfo -\param[out] pAllocation Handle to allocated memory. -\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). - -It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory(). - -This is a special-purpose function. In most cases you should use vmaCreateImage(). - -You must free the allocation using vmaFreeMemory() when no longer needed. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( - VmaAllocator VMA_NOT_NULL allocator, - VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, - const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); - -/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). - -Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( - VmaAllocator VMA_NOT_NULL allocator, - const VmaAllocation VMA_NULLABLE allocation); - -/** \brief Frees memory and destroys multiple allocations. - -Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. -It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), -vmaAllocateMemoryPages() and other functions. -It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times. - -Allocations in `pAllocations` array can come from any memory pools and types. -Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( - VmaAllocator VMA_NOT_NULL allocator, - size_t allocationCount, - const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations); - -/** \brief Returns current information about specified allocation. - -Current paramteres of given allocation are returned in `pAllocationInfo`. - -Although this function doesn't lock any mutex, so it should be quite efficient, -you should avoid calling it too often. -You can retrieve same VmaAllocationInfo structure while creating your resource, from function -vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change -(e.g. due to defragmentation). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo); - -/** \brief Sets pUserData in given allocation to new value. - -The value of pointer `pUserData` is copied to allocation's `pUserData`. -It is opaque, so you can use it however you want - e.g. -as a pointer, ordinal number or some handle to you own data. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - void* VMA_NULLABLE pUserData); - -/** \brief Sets pName in given allocation to new value. - -`pName` must be either null, or pointer to a null-terminated string. The function -makes local copy of the string and sets it as allocation's `pName`. String -passed as pName doesn't need to be valid for whole lifetime of the allocation - -you can free it after this call. String previously pointed by allocation's -`pName` is freed from memory. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const char* VMA_NULLABLE pName); - -/** -\brief Given an allocation, returns Property Flags of its memory type. - -This is just a convenience function. Same information can be obtained using -vmaGetAllocationInfo() + vmaGetMemoryProperties(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); - -/** \brief Maps memory represented by given allocation and returns pointer to it. - -Maps memory represented by given allocation to make it accessible to CPU code. -When succeeded, `*ppData` contains pointer to first byte of this memory. - -\warning -If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is -correctly offsetted to the beginning of region assigned to this particular allocation. -Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block. -You should not add VmaAllocationInfo::offset to it! - -Mapping is internally reference-counted and synchronized, so despite raw Vulkan -function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory` -multiple times simultaneously, it is safe to call this function on allocations -assigned to the same memory block. Actual Vulkan memory will be mapped on first -mapping and unmapped on last unmapping. - -If the function succeeded, you must call vmaUnmapMemory() to unmap the -allocation when mapping is no longer needed or before freeing the allocation, at -the latest. - -It also safe to call this function multiple times on the same allocation. You -must call vmaUnmapMemory() same number of times as you called vmaMapMemory(). - -It is also safe to call this function on allocation created with -#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. -You must still call vmaUnmapMemory() same number of times as you called -vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the -"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. - -This function fails when used on allocation made in memory type that is not -`HOST_VISIBLE`. - -This function doesn't automatically flush or invalidate caches. -If the allocation is made from a memory types that is not `HOST_COHERENT`, -you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - void* VMA_NULLABLE* VMA_NOT_NULL ppData); - -/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). - -For details, see description of vmaMapMemory(). - -This function doesn't automatically flush or invalidate caches. -If the allocation is made from a memory types that is not `HOST_COHERENT`, -you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation); - -/** \brief Flushes memory of given allocation. - -Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. -It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. -Unmap operation doesn't do that automatically. - -- `offset` must be relative to the beginning of allocation. -- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. -- `offset` and `size` don't have to be aligned. - They are internally rounded down/up to multiply of `nonCoherentAtomSize`. -- If `size` is 0, this call is ignored. -- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, - this call is ignored. - -Warning! `offset` and `size` are relative to the contents of given `allocation`. -If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. -Do not pass allocation's offset as `offset`!!! - -This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is -called, otherwise `VK_SUCCESS`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkDeviceSize offset, - VkDeviceSize size); - -/** \brief Invalidates memory of given allocation. - -Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. -It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. -Map operation doesn't do that automatically. - -- `offset` must be relative to the beginning of allocation. -- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. -- `offset` and `size` don't have to be aligned. - They are internally rounded down/up to multiply of `nonCoherentAtomSize`. -- If `size` is 0, this call is ignored. -- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, - this call is ignored. - -Warning! `offset` and `size` are relative to the contents of given `allocation`. -If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. -Do not pass allocation's offset as `offset`!!! - -This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if -it is called, otherwise `VK_SUCCESS`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkDeviceSize offset, - VkDeviceSize size); - -/** \brief Flushes memory of given set of allocations. - -Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations. -For more information, see documentation of vmaFlushAllocation(). - -\param allocator -\param allocationCount -\param allocations -\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. -\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. - -This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is -called, otherwise `VK_SUCCESS`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t allocationCount, - const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); - -/** \brief Invalidates memory of given set of allocations. - -Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations. -For more information, see documentation of vmaInvalidateAllocation(). - -\param allocator -\param allocationCount -\param allocations -\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. -\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. - -This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is -called, otherwise `VK_SUCCESS`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t allocationCount, - const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); - -/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. - -\param allocator -\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked. - -Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, -`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are -`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). - -Possible return values: - -- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types. -- `VK_SUCCESS` - corruption detection has been performed and succeeded. -- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. - `VMA_ASSERT` is also fired in that case. -- Other value: Error returned by Vulkan, e.g. memory mapping failure. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryTypeBits); - -/** \brief Begins defragmentation process. - -\param allocator Allocator object. -\param pInfo Structure filled with parameters of defragmentation. -\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation. -\returns -- `VK_SUCCESS` if defragmentation can begin. -- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported. - -For more information about defragmentation, see documentation chapter: -[Defragmentation](@ref defragmentation). -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( - VmaAllocator VMA_NOT_NULL allocator, - const VmaDefragmentationInfo* VMA_NOT_NULL pInfo, - VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext); - -/** \brief Ends defragmentation process. - -\param allocator Allocator object. -\param context Context object that has been created by vmaBeginDefragmentation(). -\param[out] pStats Optional stats for the defragmentation. Can be null. - -Use this function to finish defragmentation started by vmaBeginDefragmentation(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( - VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationStats* VMA_NULLABLE pStats); - -/** \brief Starts single defragmentation pass. - -\param allocator Allocator object. -\param context Context object that has been created by vmaBeginDefragmentation(). -\param[out] pPassInfo Computed informations for current pass. -\returns -- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation. -- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(), - and then preferably try another pass with vmaBeginDefragmentationPass(). -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( - VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); - -/** \brief Ends single defragmentation pass. - -\param allocator Allocator object. -\param context Context object that has been created by vmaBeginDefragmentation(). -\param pPassInfo Computed informations for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you. - -Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible. - -Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`. -After this call: - -- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY - (which is the default) will be pointing to the new destination place. -- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY - will be freed. - -If no more moves are possible you can end whole defragmentation. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( - VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); - -/** \brief Binds buffer to allocation. - -Binds specified buffer to region of memory represented by specified allocation. -Gets `VkDeviceMemory` handle and offset from the allocation. -If you want to create a buffer, allocate memory for it and bind them together separately, -you should use this function for binding instead of standard `vkBindBufferMemory()`, -because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple -allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously -(which is illegal in Vulkan). - -It is recommended to use function vmaCreateBuffer() instead of this one. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer); - -/** \brief Binds buffer to allocation with additional parameters. - -\param allocator -\param allocation -\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. -\param buffer -\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null. - -This function is similar to vmaBindBufferMemory(), but it provides additional parameters. - -If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag -or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkDeviceSize allocationLocalOffset, - VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, - const void* VMA_NULLABLE pNext); - -/** \brief Binds image to allocation. - -Binds specified image to region of memory represented by specified allocation. -Gets `VkDeviceMemory` handle and offset from the allocation. -If you want to create an image, allocate memory for it and bind them together separately, -you should use this function for binding instead of standard `vkBindImageMemory()`, -because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple -allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously -(which is illegal in Vulkan). - -It is recommended to use function vmaCreateImage() instead of this one. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkImage VMA_NOT_NULL_NON_DISPATCHABLE image); - -/** \brief Binds image to allocation with additional parameters. - -\param allocator -\param allocation -\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. -\param image -\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null. - -This function is similar to vmaBindImageMemory(), but it provides additional parameters. - -If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag -or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkDeviceSize allocationLocalOffset, - VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, - const void* VMA_NULLABLE pNext); - -/** \brief Creates a new `VkBuffer`, allocates and binds memory for it. - -\param allocator -\param pBufferCreateInfo -\param pAllocationCreateInfo -\param[out] pBuffer Buffer that was created. -\param[out] pAllocation Allocation that was created. -\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). - -This function automatically: - --# Creates buffer. --# Allocates appropriate memory for it. --# Binds the buffer with the memory. - -If any of these operations fail, buffer and allocation are not created, -returned value is negative error code, `*pBuffer` and `*pAllocation` are null. - -If the function succeeded, you must destroy both buffer and allocation when you -no longer need them using either convenience function vmaDestroyBuffer() or -separately, using `vkDestroyBuffer()` and vmaFreeMemory(). - -If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, -VK_KHR_dedicated_allocation extension is used internally to query driver whether -it requires or prefers the new buffer to have dedicated allocation. If yes, -and if dedicated allocation is possible -(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated -allocation for this buffer, just like when using -#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. - -\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer, -although recommended as a good practice, is out of scope of this library and could be implemented -by the user as a higher-level logic on top of VMA. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( - VmaAllocator VMA_NOT_NULL allocator, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); - -/** \brief Creates a buffer with additional minimum alignment. - -Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom, -minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g. -for interop with OpenGL. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( - VmaAllocator VMA_NOT_NULL allocator, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - VkDeviceSize minAlignment, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); - -/** \brief Creates a new `VkBuffer`, binds already created memory for it. - -\param allocator -\param allocation Allocation that provides memory to be used for binding new buffer to it. -\param pBufferCreateInfo -\param[out] pBuffer Buffer that was created. - -This function automatically: - --# Creates buffer. --# Binds the buffer with the supplied memory. - -If any of these operations fail, buffer is not created, -returned value is negative error code and `*pBuffer` is null. - -If the function succeeded, you must destroy the buffer when you -no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding -allocation you can use convenience function vmaDestroyBuffer(). -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); - -/** \brief Destroys Vulkan buffer and frees allocated memory. - -This is just a convenience function equivalent to: - -\code -vkDestroyBuffer(device, buffer, allocationCallbacks); -vmaFreeMemory(allocator, allocation); -\endcode - -It it safe to pass null as buffer and/or allocation. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( - VmaAllocator VMA_NOT_NULL allocator, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer, - VmaAllocation VMA_NULLABLE allocation); - -/// Function similar to vmaCreateBuffer(). -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( - VmaAllocator VMA_NOT_NULL allocator, - const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); - -/// Function similar to vmaCreateAliasingBuffer(). -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, - VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); - -/** \brief Destroys Vulkan image and frees allocated memory. - -This is just a convenience function equivalent to: - -\code -vkDestroyImage(device, image, allocationCallbacks); -vmaFreeMemory(allocator, allocation); -\endcode - -It it safe to pass null as image and/or allocation. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( - VmaAllocator VMA_NOT_NULL allocator, - VkImage VMA_NULLABLE_NON_DISPATCHABLE image, - VmaAllocation VMA_NULLABLE allocation); - -/** @} */ - -/** -\addtogroup group_virtual -@{ -*/ - -/** \brief Creates new #VmaVirtualBlock object. - -\param pCreateInfo Parameters for creation. -\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( - const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock); - -/** \brief Destroys #VmaVirtualBlock object. - -Please note that you should consciously handle virtual allocations that could remain unfreed in the block. -You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock() -if you are sure this is what you want. If you do neither, an assert is called. - -If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`, -don't forget to free them. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock( - VmaVirtualBlock VMA_NULLABLE virtualBlock); - -/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations. -*/ -VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty( - VmaVirtualBlock VMA_NOT_NULL virtualBlock); - -/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo); - -/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock. - -If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned -(despite the function doesn't ever allocate actual GPU memory). -`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`. - -\param virtualBlock Virtual block -\param pCreateInfo Parameters for the allocation -\param[out] pAllocation Returned handle of the new allocation -\param[out] pOffset Returned offset of the new allocation. Optional, can be null. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, - VkDeviceSize* VMA_NULLABLE pOffset); - -/** \brief Frees virtual allocation inside given #VmaVirtualBlock. - -It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation); - -/** \brief Frees all virtual allocations inside given #VmaVirtualBlock. - -You must either call this function or free each virtual allocation individually with vmaVirtualFree() -before destroying a virtual block. Otherwise, an assert is called. - -If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`, -don't forget to free it as well. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock( - VmaVirtualBlock VMA_NOT_NULL virtualBlock); - -/** \brief Changes custom pointer associated with given virtual allocation. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, - void* VMA_NULLABLE pUserData); - -/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock. - -This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaStatistics* VMA_NOT_NULL pStats); - -/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock. - -This function is slow to call. Use for debugging purposes. -For less detailed statistics, see vmaGetVirtualBlockStatistics(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaDetailedStatistics* VMA_NOT_NULL pStats); - -/** @} */ - -#if VMA_STATS_STRING_ENABLED -/** -\addtogroup group_stats -@{ -*/ - -/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock. -\param virtualBlock Virtual block. -\param[out] ppStatsString Returned string. -\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces. - -Returned string must be freed using vmaFreeVirtualBlockStatsString(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, - VkBool32 detailedMap); - -/// Frees a string returned by vmaBuildVirtualBlockStatsString(). -VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - char* VMA_NULLABLE pStatsString); - -/** \brief Builds and returns statistics as a null-terminated string in JSON format. -\param allocator -\param[out] ppStatsString Must be freed using vmaFreeStatsString() function. -\param detailedMap -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( - VmaAllocator VMA_NOT_NULL allocator, - char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, - VkBool32 detailedMap); - -VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( - VmaAllocator VMA_NOT_NULL allocator, - char* VMA_NULLABLE pStatsString); - -/** @} */ - -#endif // VMA_STATS_STRING_ENABLED - -#endif // _VMA_FUNCTION_HEADERS - -#ifdef __cplusplus -} -#endif - -#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H - -//////////////////////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////// -// -// IMPLEMENTATION -// -//////////////////////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////// - -// For Visual Studio IntelliSense. -#if defined(__cplusplus) && defined(__INTELLISENSE__) -#define VMA_IMPLEMENTATION -#endif - -#ifdef VMA_IMPLEMENTATION -#undef VMA_IMPLEMENTATION - -#include -#include -#include -#include -#include - -#ifdef _MSC_VER - #include // For functions like __popcnt, _BitScanForward etc. -#endif -#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 - #include // For std::popcount -#endif - -/******************************************************************************* -CONFIGURATION SECTION - -Define some of these macros before each #include of this header or change them -here if you need other then default behavior depending on your environment. -*/ -#ifndef _VMA_CONFIGURATION - -/* -Define this macro to 1 to make the library fetch pointers to Vulkan functions -internally, like: - - vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; -*/ -#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES) - #define VMA_STATIC_VULKAN_FUNCTIONS 1 -#endif - -/* -Define this macro to 1 to make the library fetch pointers to Vulkan functions -internally, like: - - vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory"); - -To use this feature in new versions of VMA you now have to pass -VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as -VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null. -*/ -#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS) - #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 -#endif - -#ifndef VMA_USE_STL_SHARED_MUTEX - // Compiler conforms to C++17. - #if __cplusplus >= 201703L - #define VMA_USE_STL_SHARED_MUTEX 1 - // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus - // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. - #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L - #define VMA_USE_STL_SHARED_MUTEX 1 - #else - #define VMA_USE_STL_SHARED_MUTEX 0 - #endif -#endif - -/* -Define this macro to include custom header files without having to edit this file directly, e.g.: - - // Inside of "my_vma_configuration_user_includes.h": - - #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT - #include "my_custom_min.h" // for my_custom_min - #include - #include - - // Inside a different file, which includes "vk_mem_alloc.h": - - #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h" - #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr) - #define VMA_MIN(v1, v2) (my_custom_min(v1, v2)) - #include "vk_mem_alloc.h" - ... - -The following headers are used in this CONFIGURATION section only, so feel free to -remove them if not needed. -*/ -#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H) - #include // for assert - #include // for min, max - #include -#else - #include VMA_CONFIGURATION_USER_INCLUDES_H -#endif - -#ifndef VMA_NULL - // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. - #define VMA_NULL nullptr -#endif - -#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16) -#include -static void* vma_aligned_alloc(size_t alignment, size_t size) -{ - // alignment must be >= sizeof(void*) - if(alignment < sizeof(void*)) - { - alignment = sizeof(void*); - } - - return memalign(alignment, size); -} -#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) -#include - -#if defined(__APPLE__) -#include -#endif - -static void* vma_aligned_alloc(size_t alignment, size_t size) -{ - // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4) - // Therefore, for now disable this specific exception until a proper solution is found. - //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0)) - //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0 - // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only - // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds - // // MAC_OS_X_VERSION_10_16), even though the function is marked - // // availabe for 10.15. That is why the preprocessor checks for 10.16 but - // // the __builtin_available checks for 10.15. - // // People who use C++17 could call aligned_alloc with the 10.15 SDK already. - // if (__builtin_available(macOS 10.15, iOS 13, *)) - // return aligned_alloc(alignment, size); - //#endif - //#endif - - // alignment must be >= sizeof(void*) - if(alignment < sizeof(void*)) - { - alignment = sizeof(void*); - } - - void *pointer; - if(posix_memalign(&pointer, alignment, size) == 0) - return pointer; - return VMA_NULL; -} -#elif defined(_WIN32) -static void* vma_aligned_alloc(size_t alignment, size_t size) -{ - return _aligned_malloc(size, alignment); -} -#else -static void* vma_aligned_alloc(size_t alignment, size_t size) -{ - return aligned_alloc(alignment, size); -} -#endif - -#if defined(_WIN32) -static void vma_aligned_free(void* ptr) -{ - _aligned_free(ptr); -} -#else -static void vma_aligned_free(void* VMA_NULLABLE ptr) -{ - free(ptr); -} -#endif - -// If your compiler is not compatible with C++11 and definition of -// aligned_alloc() function is missing, uncommeting following line may help: - -//#include - -// Normal assert to check for programmer's errors, especially in Debug configuration. -#ifndef VMA_ASSERT - #ifdef NDEBUG - #define VMA_ASSERT(expr) - #else - #define VMA_ASSERT(expr) assert(expr) - #endif -#endif - -// Assert that will be called very often, like inside data structures e.g. operator[]. -// Making it non-empty can make program slow. -#ifndef VMA_HEAVY_ASSERT - #ifdef NDEBUG - #define VMA_HEAVY_ASSERT(expr) - #else - #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) - #endif -#endif - -#ifndef VMA_ALIGN_OF - #define VMA_ALIGN_OF(type) (__alignof(type)) -#endif - -#ifndef VMA_SYSTEM_ALIGNED_MALLOC - #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size)) -#endif - -#ifndef VMA_SYSTEM_ALIGNED_FREE - // VMA_SYSTEM_FREE is the old name, but might have been defined by the user - #if defined(VMA_SYSTEM_FREE) - #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr) - #else - #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr) - #endif -#endif - -#ifndef VMA_COUNT_BITS_SET - // Returns number of bits set to 1 in (v) - #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v) -#endif - -#ifndef VMA_BITSCAN_LSB - // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX - #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask) -#endif - -#ifndef VMA_BITSCAN_MSB - // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX - #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask) -#endif - -#ifndef VMA_MIN - #define VMA_MIN(v1, v2) ((std::min)((v1), (v2))) -#endif - -#ifndef VMA_MAX - #define VMA_MAX(v1, v2) ((std::max)((v1), (v2))) -#endif - -#ifndef VMA_SWAP - #define VMA_SWAP(v1, v2) std::swap((v1), (v2)) -#endif - -#ifndef VMA_SORT - #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) -#endif - -#ifndef VMA_DEBUG_LOG - #define VMA_DEBUG_LOG(format, ...) - /* - #define VMA_DEBUG_LOG(format, ...) do { \ - printf(format, __VA_ARGS__); \ - printf("\n"); \ - } while(false) - */ -#endif - -// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString. -#if VMA_STATS_STRING_ENABLED - static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num) - { - snprintf(outStr, strLen, "%u", static_cast(num)); - } - static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num) - { - snprintf(outStr, strLen, "%llu", static_cast(num)); - } - static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr) - { - snprintf(outStr, strLen, "%p", ptr); - } -#endif - -#ifndef VMA_MUTEX - class VmaMutex - { - public: - void Lock() { m_Mutex.lock(); } - void Unlock() { m_Mutex.unlock(); } - bool TryLock() { return m_Mutex.try_lock(); } - private: - std::mutex m_Mutex; - }; - #define VMA_MUTEX VmaMutex -#endif - -// Read-write mutex, where "read" is shared access, "write" is exclusive access. -#ifndef VMA_RW_MUTEX - #if VMA_USE_STL_SHARED_MUTEX - // Use std::shared_mutex from C++17. - #include - class VmaRWMutex - { - public: - void LockRead() { m_Mutex.lock_shared(); } - void UnlockRead() { m_Mutex.unlock_shared(); } - bool TryLockRead() { return m_Mutex.try_lock_shared(); } - void LockWrite() { m_Mutex.lock(); } - void UnlockWrite() { m_Mutex.unlock(); } - bool TryLockWrite() { return m_Mutex.try_lock(); } - private: - std::shared_mutex m_Mutex; - }; - #define VMA_RW_MUTEX VmaRWMutex - #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600 - // Use SRWLOCK from WinAPI. - // Minimum supported client = Windows Vista, server = Windows Server 2008. - class VmaRWMutex - { - public: - VmaRWMutex() { InitializeSRWLock(&m_Lock); } - void LockRead() { AcquireSRWLockShared(&m_Lock); } - void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } - bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; } - void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } - void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } - bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; } - private: - SRWLOCK m_Lock; - }; - #define VMA_RW_MUTEX VmaRWMutex - #else - // Less efficient fallback: Use normal mutex. - class VmaRWMutex - { - public: - void LockRead() { m_Mutex.Lock(); } - void UnlockRead() { m_Mutex.Unlock(); } - bool TryLockRead() { return m_Mutex.TryLock(); } - void LockWrite() { m_Mutex.Lock(); } - void UnlockWrite() { m_Mutex.Unlock(); } - bool TryLockWrite() { return m_Mutex.TryLock(); } - private: - VMA_MUTEX m_Mutex; - }; - #define VMA_RW_MUTEX VmaRWMutex - #endif // #if VMA_USE_STL_SHARED_MUTEX -#endif // #ifndef VMA_RW_MUTEX - -/* -If providing your own implementation, you need to implement a subset of std::atomic. -*/ -#ifndef VMA_ATOMIC_UINT32 - #include - #define VMA_ATOMIC_UINT32 std::atomic -#endif - -#ifndef VMA_ATOMIC_UINT64 - #include - #define VMA_ATOMIC_UINT64 std::atomic -#endif - -#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY - /** - Every allocation will have its own memory block. - Define to 1 for debugging purposes only. - */ - #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) -#endif - -#ifndef VMA_MIN_ALIGNMENT - /** - Minimum alignment of all allocations, in bytes. - Set to more than 1 for debugging purposes. Must be power of two. - */ - #ifdef VMA_DEBUG_ALIGNMENT // Old name - #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT - #else - #define VMA_MIN_ALIGNMENT (1) - #endif -#endif - -#ifndef VMA_DEBUG_MARGIN - /** - Minimum margin after every allocation, in bytes. - Set nonzero for debugging purposes only. - */ - #define VMA_DEBUG_MARGIN (0) -#endif - -#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS - /** - Define this macro to 1 to automatically fill new allocations and destroyed - allocations with some bit pattern. - */ - #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) -#endif - -#ifndef VMA_DEBUG_DETECT_CORRUPTION - /** - Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to - enable writing magic value to the margin after every allocation and - validating it, so that memory corruptions (out-of-bounds writes) are detected. - */ - #define VMA_DEBUG_DETECT_CORRUPTION (0) -#endif - -#ifndef VMA_DEBUG_GLOBAL_MUTEX - /** - Set this to 1 for debugging purposes only, to enable single mutex protecting all - entry calls to the library. Can be useful for debugging multithreading issues. - */ - #define VMA_DEBUG_GLOBAL_MUTEX (0) -#endif - -#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY - /** - Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. - Set to more than 1 for debugging purposes only. Must be power of two. - */ - #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) -#endif - -#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT - /* - Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount - and return error instead of leaving up to Vulkan implementation what to do in such cases. - */ - #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0) -#endif - -#ifndef VMA_SMALL_HEAP_MAX_SIZE - /// Maximum size of a memory heap in Vulkan to consider it "small". - #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) -#endif - -#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE - /// Default size of a block allocated as single VkDeviceMemory from a "large" heap. - #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) -#endif - -/* -Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called -or a persistently mapped allocation is created and destroyed several times in a row. -It keeps additional +1 mapping of a device memory block to prevent calling actual -vkMapMemory/vkUnmapMemory too many times, which may improve performance and help -tools like RenderDOc. -*/ -#ifndef VMA_MAPPING_HYSTERESIS_ENABLED - #define VMA_MAPPING_HYSTERESIS_ENABLED 1 -#endif - -#ifndef VMA_CLASS_NO_COPY - #define VMA_CLASS_NO_COPY(className) \ - private: \ - className(const className&) = delete; \ - className& operator=(const className&) = delete; -#endif - -#define VMA_VALIDATE(cond) do { if(!(cond)) { \ - VMA_ASSERT(0 && "Validation failed: " #cond); \ - return false; \ - } } while(false) - -/******************************************************************************* -END OF CONFIGURATION -*/ -#endif // _VMA_CONFIGURATION - - -static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC; -static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF; -// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F. -static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666; - -// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants. -static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040; -static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080; -static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000; -static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200; -static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000; -static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u; -static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32; -static const uint32_t VMA_VENDOR_ID_AMD = 4098; - -// This one is tricky. Vulkan specification defines this code as available since -// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131. -// See pull request #207. -#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13) - - -#if VMA_STATS_STRING_ENABLED -// Correspond to values of enum VmaSuballocationType. -static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = -{ - "FREE", - "UNKNOWN", - "BUFFER", - "IMAGE_UNKNOWN", - "IMAGE_LINEAR", - "IMAGE_OPTIMAL", -}; -#endif - -static VkAllocationCallbacks VmaEmptyAllocationCallbacks = - { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL }; - - -#ifndef _VMA_ENUM_DECLARATIONS - -enum VmaSuballocationType -{ - VMA_SUBALLOCATION_TYPE_FREE = 0, - VMA_SUBALLOCATION_TYPE_UNKNOWN = 1, - VMA_SUBALLOCATION_TYPE_BUFFER = 2, - VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3, - VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4, - VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5, - VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF -}; - -enum VMA_CACHE_OPERATION -{ - VMA_CACHE_FLUSH, - VMA_CACHE_INVALIDATE -}; - -enum class VmaAllocationRequestType -{ - Normal, - TLSF, - // Used by "Linear" algorithm. - UpperAddress, - EndOf1st, - EndOf2nd, -}; - -#endif // _VMA_ENUM_DECLARATIONS - -#ifndef _VMA_FORWARD_DECLARATIONS -// Opaque handle used by allocation algorithms to identify single allocation in any conforming way. -VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle); - -struct VmaMutexLock; -struct VmaMutexLockRead; -struct VmaMutexLockWrite; - -template -struct AtomicTransactionalIncrement; - -template -struct VmaStlAllocator; - -template -class VmaVector; - -template -class VmaSmallVector; - -template -class VmaPoolAllocator; - -template -struct VmaListItem; - -template -class VmaRawList; - -template -class VmaList; - -template -class VmaIntrusiveLinkedList; - -// Unused in this version -#if 0 -template -struct VmaPair; -template -struct VmaPairFirstLess; - -template -class VmaMap; -#endif - -#if VMA_STATS_STRING_ENABLED -class VmaStringBuilder; -class VmaJsonWriter; -#endif - -class VmaDeviceMemoryBlock; - -struct VmaDedicatedAllocationListItemTraits; -class VmaDedicatedAllocationList; - -struct VmaSuballocation; -struct VmaSuballocationOffsetLess; -struct VmaSuballocationOffsetGreater; -struct VmaSuballocationItemSizeLess; - -typedef VmaList> VmaSuballocationList; - -struct VmaAllocationRequest; - -class VmaBlockMetadata; -class VmaBlockMetadata_Linear; -class VmaBlockMetadata_TLSF; - -class VmaBlockVector; - -struct VmaPoolListItemTraits; - -struct VmaCurrentBudgetData; - -class VmaAllocationObjectAllocator; - -#endif // _VMA_FORWARD_DECLARATIONS - - -#ifndef _VMA_FUNCTIONS - -/* -Returns number of bits set to 1 in (v). - -On specific platforms and compilers you can use instrinsics like: - -Visual Studio: - return __popcnt(v); -GCC, Clang: - return static_cast(__builtin_popcount(v)); - -Define macro VMA_COUNT_BITS_SET to provide your optimized implementation. -But you need to check in runtime whether user's CPU supports these, as some old processors don't. -*/ -static inline uint32_t VmaCountBitsSet(uint32_t v) -{ -#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 - return std::popcount(v); -#else - uint32_t c = v - ((v >> 1) & 0x55555555); - c = ((c >> 2) & 0x33333333) + (c & 0x33333333); - c = ((c >> 4) + c) & 0x0F0F0F0F; - c = ((c >> 8) + c) & 0x00FF00FF; - c = ((c >> 16) + c) & 0x0000FFFF; - return c; -#endif -} - -static inline uint8_t VmaBitScanLSB(uint64_t mask) -{ -#if defined(_MSC_VER) && defined(_WIN64) - unsigned long pos; - if (_BitScanForward64(&pos, mask)) - return static_cast(pos); - return UINT8_MAX; -#elif defined __GNUC__ || defined __clang__ - return static_cast(__builtin_ffsll(mask)) - 1U; -#else - uint8_t pos = 0; - uint64_t bit = 1; - do - { - if (mask & bit) - return pos; - bit <<= 1; - } while (pos++ < 63); - return UINT8_MAX; -#endif -} - -static inline uint8_t VmaBitScanLSB(uint32_t mask) -{ -#ifdef _MSC_VER - unsigned long pos; - if (_BitScanForward(&pos, mask)) - return static_cast(pos); - return UINT8_MAX; -#elif defined __GNUC__ || defined __clang__ - return static_cast(__builtin_ffs(mask)) - 1U; -#else - uint8_t pos = 0; - uint32_t bit = 1; - do - { - if (mask & bit) - return pos; - bit <<= 1; - } while (pos++ < 31); - return UINT8_MAX; -#endif -} - -static inline uint8_t VmaBitScanMSB(uint64_t mask) -{ -#if defined(_MSC_VER) && defined(_WIN64) - unsigned long pos; - if (_BitScanReverse64(&pos, mask)) - return static_cast(pos); -#elif defined __GNUC__ || defined __clang__ - if (mask) - return 63 - static_cast(__builtin_clzll(mask)); -#else - uint8_t pos = 63; - uint64_t bit = 1ULL << 63; - do - { - if (mask & bit) - return pos; - bit >>= 1; - } while (pos-- > 0); -#endif - return UINT8_MAX; -} - -static inline uint8_t VmaBitScanMSB(uint32_t mask) -{ -#ifdef _MSC_VER - unsigned long pos; - if (_BitScanReverse(&pos, mask)) - return static_cast(pos); -#elif defined __GNUC__ || defined __clang__ - if (mask) - return 31 - static_cast(__builtin_clz(mask)); -#else - uint8_t pos = 31; - uint32_t bit = 1UL << 31; - do - { - if (mask & bit) - return pos; - bit >>= 1; - } while (pos-- > 0); -#endif - return UINT8_MAX; -} - -/* -Returns true if given number is a power of two. -T must be unsigned integer number or signed integer but always nonnegative. -For 0 returns true. -*/ -template -inline bool VmaIsPow2(T x) -{ - return (x & (x - 1)) == 0; -} - -// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16. -// Use types like uint32_t, uint64_t as T. -template -static inline T VmaAlignUp(T val, T alignment) -{ - VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); - return (val + alignment - 1) & ~(alignment - 1); -} - -// Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8. -// Use types like uint32_t, uint64_t as T. -template -static inline T VmaAlignDown(T val, T alignment) -{ - VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); - return val & ~(alignment - 1); -} - -// Division with mathematical rounding to nearest number. -template -static inline T VmaRoundDiv(T x, T y) -{ - return (x + (y / (T)2)) / y; -} - -// Divide by 'y' and round up to nearest integer. -template -static inline T VmaDivideRoundingUp(T x, T y) -{ - return (x + y - (T)1) / y; -} - -// Returns smallest power of 2 greater or equal to v. -static inline uint32_t VmaNextPow2(uint32_t v) -{ - v--; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - v++; - return v; -} - -static inline uint64_t VmaNextPow2(uint64_t v) -{ - v--; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - v |= v >> 32; - v++; - return v; -} - -// Returns largest power of 2 less or equal to v. -static inline uint32_t VmaPrevPow2(uint32_t v) -{ - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - v = v ^ (v >> 1); - return v; -} - -static inline uint64_t VmaPrevPow2(uint64_t v) -{ - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - v |= v >> 32; - v = v ^ (v >> 1); - return v; -} - -static inline bool VmaStrIsEmpty(const char* pStr) -{ - return pStr == VMA_NULL || *pStr == '\0'; -} - -/* -Returns true if two memory blocks occupy overlapping pages. -ResourceA must be in less memory offset than ResourceB. - -Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)" -chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity". -*/ -static inline bool VmaBlocksOnSamePage( - VkDeviceSize resourceAOffset, - VkDeviceSize resourceASize, - VkDeviceSize resourceBOffset, - VkDeviceSize pageSize) -{ - VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0); - VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1; - VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1); - VkDeviceSize resourceBStart = resourceBOffset; - VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1); - return resourceAEndPage == resourceBStartPage; -} - -/* -Returns true if given suballocation types could conflict and must respect -VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer -or linear image and another one is optimal image. If type is unknown, behave -conservatively. -*/ -static inline bool VmaIsBufferImageGranularityConflict( - VmaSuballocationType suballocType1, - VmaSuballocationType suballocType2) -{ - if (suballocType1 > suballocType2) - { - VMA_SWAP(suballocType1, suballocType2); - } - - switch (suballocType1) - { - case VMA_SUBALLOCATION_TYPE_FREE: - return false; - case VMA_SUBALLOCATION_TYPE_UNKNOWN: - return true; - case VMA_SUBALLOCATION_TYPE_BUFFER: - return - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; - case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN: - return - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; - case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR: - return - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; - case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL: - return false; - default: - VMA_ASSERT(0); - return true; - } -} - -static void VmaWriteMagicValue(void* pData, VkDeviceSize offset) -{ -#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION - uint32_t* pDst = (uint32_t*)((char*)pData + offset); - const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); - for (size_t i = 0; i < numberCount; ++i, ++pDst) - { - *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE; - } -#else - // no-op -#endif -} - -static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) -{ -#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION - const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset); - const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); - for (size_t i = 0; i < numberCount; ++i, ++pSrc) - { - if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) - { - return false; - } - } -#endif - return true; -} - -/* -Fills structure with parameters of an example buffer to be used for transfers -during GPU memory defragmentation. -*/ -static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo) -{ - memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo)); - outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; - outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size. -} - - -/* -Performs binary search and returns iterator to first element that is greater or -equal to (key), according to comparison (cmp). - -Cmp should return true if first argument is less than second argument. - -Returned value is the found element, if present in the collection or place where -new element with value (key) should be inserted. -*/ -template -static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp) -{ - size_t down = 0, up = (end - beg); - while (down < up) - { - const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation - if (cmp(*(beg + mid), key)) - { - down = mid + 1; - } - else - { - up = mid; - } - } - return beg + down; -} - -template -IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp) -{ - IterT it = VmaBinaryFindFirstNotLess( - beg, end, value, cmp); - if (it == end || - (!cmp(*it, value) && !cmp(value, *it))) - { - return it; - } - return end; -} - -/* -Returns true if all pointers in the array are not-null and unique. -Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT. -T must be pointer type, e.g. VmaAllocation, VmaPool. -*/ -template -static bool VmaValidatePointerArray(uint32_t count, const T* arr) -{ - for (uint32_t i = 0; i < count; ++i) - { - const T iPtr = arr[i]; - if (iPtr == VMA_NULL) - { - return false; - } - for (uint32_t j = i + 1; j < count; ++j) - { - if (iPtr == arr[j]) - { - return false; - } - } - } - return true; -} - -template -static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct) -{ - newStruct->pNext = mainStruct->pNext; - mainStruct->pNext = newStruct; -} - -// This is the main algorithm that guides the selection of a memory type best for an allocation - -// converts usage to required/preferred/not preferred flags. -static bool FindMemoryPreferences( - bool isIntegratedGPU, - const VmaAllocationCreateInfo& allocCreateInfo, - VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown. - VkMemoryPropertyFlags& outRequiredFlags, - VkMemoryPropertyFlags& outPreferredFlags, - VkMemoryPropertyFlags& outNotPreferredFlags) -{ - outRequiredFlags = allocCreateInfo.requiredFlags; - outPreferredFlags = allocCreateInfo.preferredFlags; - outNotPreferredFlags = 0; - - switch(allocCreateInfo.usage) - { - case VMA_MEMORY_USAGE_UNKNOWN: - break; - case VMA_MEMORY_USAGE_GPU_ONLY: - if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) - { - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - break; - case VMA_MEMORY_USAGE_CPU_ONLY: - outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; - break; - case VMA_MEMORY_USAGE_CPU_TO_GPU: - outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) - { - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - break; - case VMA_MEMORY_USAGE_GPU_TO_CPU: - outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; - break; - case VMA_MEMORY_USAGE_CPU_COPY: - outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - break; - case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: - outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; - break; - case VMA_MEMORY_USAGE_AUTO: - case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE: - case VMA_MEMORY_USAGE_AUTO_PREFER_HOST: - { - if(bufImgUsage == UINT32_MAX) - { - VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known."); - return false; - } - // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*. - const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0; - const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0; - const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0; - const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0; - const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; - const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST; - - // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU. - if(hostAccessRandom) - { - if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) - { - // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL. - // Omitting HOST_VISIBLE here is intentional. - // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one. - // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list. - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; - } - else - { - // Always CPU memory, cached. - outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; - } - } - // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined. - else if(hostAccessSequentialWrite) - { - // Want uncached and write-combined. - outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; - - if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) - { - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - } - else - { - outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame) - if(deviceAccess) - { - // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory. - if(preferHost) - outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - else - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU) - else - { - // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory. - if(preferDevice) - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - else - outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - } - } - // No CPU access - else - { - // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory - if(deviceAccess) - { - // ...unless there is a clear preference from the user not to do so. - if(preferHost) - outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - else - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - // No direct GPU access, no CPU access, just transfers. - // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or - // a "swap file" copy to free some GPU memory (then better CPU memory). - // Up to the user to decide. If no preferece, assume the former and choose GPU memory. - if(preferHost) - outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - else - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - break; - } - default: - VMA_ASSERT(0); - } - - // Avoid DEVICE_COHERENT unless explicitly requested. - if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) & - (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) - { - outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY; - } - - return true; -} - -//////////////////////////////////////////////////////////////////////////////// -// Memory allocation - -static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment) -{ - void* result = VMA_NULL; - if ((pAllocationCallbacks != VMA_NULL) && - (pAllocationCallbacks->pfnAllocation != VMA_NULL)) - { - result = (*pAllocationCallbacks->pfnAllocation)( - pAllocationCallbacks->pUserData, - size, - alignment, - VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); - } - else - { - result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment); - } - VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed."); - return result; -} - -static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr) -{ - if ((pAllocationCallbacks != VMA_NULL) && - (pAllocationCallbacks->pfnFree != VMA_NULL)) - { - (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr); - } - else - { - VMA_SYSTEM_ALIGNED_FREE(ptr); - } -} - -template -static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks) -{ - return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); -} - -template -static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count) -{ - return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); -} - -#define vma_new(allocator, type) new(VmaAllocate(allocator))(type) - -#define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type) - -template -static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr) -{ - ptr->~T(); - VmaFree(pAllocationCallbacks, ptr); -} - -template -static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count) -{ - if (ptr != VMA_NULL) - { - for (size_t i = count; i--; ) - { - ptr[i].~T(); - } - VmaFree(pAllocationCallbacks, ptr); - } -} - -static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr) -{ - if (srcStr != VMA_NULL) - { - const size_t len = strlen(srcStr); - char* const result = vma_new_array(allocs, char, len + 1); - memcpy(result, srcStr, len + 1); - return result; - } - return VMA_NULL; -} - -#if VMA_STATS_STRING_ENABLED -static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen) -{ - if (srcStr != VMA_NULL) - { - char* const result = vma_new_array(allocs, char, strLen + 1); - memcpy(result, srcStr, strLen); - result[strLen] = '\0'; - return result; - } - return VMA_NULL; -} -#endif // VMA_STATS_STRING_ENABLED - -static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str) -{ - if (str != VMA_NULL) - { - const size_t len = strlen(str); - vma_delete_array(allocs, str, len + 1); - } -} - -template -size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value) -{ - const size_t indexToInsert = VmaBinaryFindFirstNotLess( - vector.data(), - vector.data() + vector.size(), - value, - CmpLess()) - vector.data(); - VmaVectorInsert(vector, indexToInsert, value); - return indexToInsert; -} - -template -bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value) -{ - CmpLess comparator; - typename VectorT::iterator it = VmaBinaryFindFirstNotLess( - vector.begin(), - vector.end(), - value, - comparator); - if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it)) - { - size_t indexToRemove = it - vector.begin(); - VmaVectorRemove(vector, indexToRemove); - return true; - } - return false; -} -#endif // _VMA_FUNCTIONS - -#ifndef _VMA_STATISTICS_FUNCTIONS - -static void VmaClearStatistics(VmaStatistics& outStats) -{ - outStats.blockCount = 0; - outStats.allocationCount = 0; - outStats.blockBytes = 0; - outStats.allocationBytes = 0; -} - -static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src) -{ - inoutStats.blockCount += src.blockCount; - inoutStats.allocationCount += src.allocationCount; - inoutStats.blockBytes += src.blockBytes; - inoutStats.allocationBytes += src.allocationBytes; -} - -static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats) -{ - VmaClearStatistics(outStats.statistics); - outStats.unusedRangeCount = 0; - outStats.allocationSizeMin = VK_WHOLE_SIZE; - outStats.allocationSizeMax = 0; - outStats.unusedRangeSizeMin = VK_WHOLE_SIZE; - outStats.unusedRangeSizeMax = 0; -} - -static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size) -{ - inoutStats.statistics.allocationCount++; - inoutStats.statistics.allocationBytes += size; - inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size); - inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size); -} - -static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size) -{ - inoutStats.unusedRangeCount++; - inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size); - inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size); -} - -static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src) -{ - VmaAddStatistics(inoutStats.statistics, src.statistics); - inoutStats.unusedRangeCount += src.unusedRangeCount; - inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin); - inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax); - inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin); - inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax); -} - -#endif // _VMA_STATISTICS_FUNCTIONS - -#ifndef _VMA_MUTEX_LOCK -// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope). -struct VmaMutexLock -{ - VMA_CLASS_NO_COPY(VmaMutexLock) -public: - VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) : - m_pMutex(useMutex ? &mutex : VMA_NULL) - { - if (m_pMutex) { m_pMutex->Lock(); } - } - ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } } - -private: - VMA_MUTEX* m_pMutex; -}; - -// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading. -struct VmaMutexLockRead -{ - VMA_CLASS_NO_COPY(VmaMutexLockRead) -public: - VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) : - m_pMutex(useMutex ? &mutex : VMA_NULL) - { - if (m_pMutex) { m_pMutex->LockRead(); } - } - ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } } - -private: - VMA_RW_MUTEX* m_pMutex; -}; - -// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing. -struct VmaMutexLockWrite -{ - VMA_CLASS_NO_COPY(VmaMutexLockWrite) -public: - VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) - : m_pMutex(useMutex ? &mutex : VMA_NULL) - { - if (m_pMutex) { m_pMutex->LockWrite(); } - } - ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } } - -private: - VMA_RW_MUTEX* m_pMutex; -}; - -#if VMA_DEBUG_GLOBAL_MUTEX - static VMA_MUTEX gDebugGlobalMutex; - #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); -#else - #define VMA_DEBUG_GLOBAL_MUTEX_LOCK -#endif -#endif // _VMA_MUTEX_LOCK - -#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT -// An object that increments given atomic but decrements it back in the destructor unless Commit() is called. -template -struct AtomicTransactionalIncrement -{ -public: - typedef std::atomic AtomicT; - - ~AtomicTransactionalIncrement() - { - if(m_Atomic) - --(*m_Atomic); - } - - void Commit() { m_Atomic = nullptr; } - T Increment(AtomicT* atomic) - { - m_Atomic = atomic; - return m_Atomic->fetch_add(1); - } - -private: - AtomicT* m_Atomic = nullptr; -}; -#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT - -#ifndef _VMA_STL_ALLOCATOR -// STL-compatible allocator. -template -struct VmaStlAllocator -{ - const VkAllocationCallbacks* const m_pCallbacks; - typedef T value_type; - - VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {} - template - VmaStlAllocator(const VmaStlAllocator& src) : m_pCallbacks(src.m_pCallbacks) {} - VmaStlAllocator(const VmaStlAllocator&) = default; - VmaStlAllocator& operator=(const VmaStlAllocator&) = delete; - - T* allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); } - void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); } - - template - bool operator==(const VmaStlAllocator& rhs) const - { - return m_pCallbacks == rhs.m_pCallbacks; - } - template - bool operator!=(const VmaStlAllocator& rhs) const - { - return m_pCallbacks != rhs.m_pCallbacks; - } -}; -#endif // _VMA_STL_ALLOCATOR - -#ifndef _VMA_VECTOR -/* Class with interface compatible with subset of std::vector. -T must be POD because constructors and destructors are not called and memcpy is -used for these objects. */ -template -class VmaVector -{ -public: - typedef T value_type; - typedef T* iterator; - typedef const T* const_iterator; - - VmaVector(const AllocatorT& allocator); - VmaVector(size_t count, const AllocatorT& allocator); - // This version of the constructor is here for compatibility with pre-C++14 std::vector. - // value is unused. - VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {} - VmaVector(const VmaVector& src); - VmaVector& operator=(const VmaVector& rhs); - ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); } - - bool empty() const { return m_Count == 0; } - size_t size() const { return m_Count; } - T* data() { return m_pArray; } - T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } - T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } - const T* data() const { return m_pArray; } - const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } - const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } - - iterator begin() { return m_pArray; } - iterator end() { return m_pArray + m_Count; } - const_iterator cbegin() const { return m_pArray; } - const_iterator cend() const { return m_pArray + m_Count; } - const_iterator begin() const { return cbegin(); } - const_iterator end() const { return cend(); } - - void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } - void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } - void push_front(const T& src) { insert(0, src); } - - void push_back(const T& src); - void reserve(size_t newCapacity, bool freeMemory = false); - void resize(size_t newCount); - void clear() { resize(0); } - void shrink_to_fit(); - void insert(size_t index, const T& src); - void remove(size_t index); - - T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } - const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } - -private: - AllocatorT m_Allocator; - T* m_pArray; - size_t m_Count; - size_t m_Capacity; -}; - -#ifndef _VMA_VECTOR_FUNCTIONS -template -VmaVector::VmaVector(const AllocatorT& allocator) - : m_Allocator(allocator), - m_pArray(VMA_NULL), - m_Count(0), - m_Capacity(0) {} - -template -VmaVector::VmaVector(size_t count, const AllocatorT& allocator) - : m_Allocator(allocator), - m_pArray(count ? (T*)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL), - m_Count(count), - m_Capacity(count) {} - -template -VmaVector::VmaVector(const VmaVector& src) - : m_Allocator(src.m_Allocator), - m_pArray(src.m_Count ? (T*)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), - m_Count(src.m_Count), - m_Capacity(src.m_Count) -{ - if (m_Count != 0) - { - memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T)); - } -} - -template -VmaVector& VmaVector::operator=(const VmaVector& rhs) -{ - if (&rhs != this) - { - resize(rhs.m_Count); - if (m_Count != 0) - { - memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T)); - } - } - return *this; -} - -template -void VmaVector::push_back(const T& src) -{ - const size_t newIndex = size(); - resize(newIndex + 1); - m_pArray[newIndex] = src; -} - -template -void VmaVector::reserve(size_t newCapacity, bool freeMemory) -{ - newCapacity = VMA_MAX(newCapacity, m_Count); - - if ((newCapacity < m_Capacity) && !freeMemory) - { - newCapacity = m_Capacity; - } - - if (newCapacity != m_Capacity) - { - T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL; - if (m_Count != 0) - { - memcpy(newArray, m_pArray, m_Count * sizeof(T)); - } - VmaFree(m_Allocator.m_pCallbacks, m_pArray); - m_Capacity = newCapacity; - m_pArray = newArray; - } -} - -template -void VmaVector::resize(size_t newCount) -{ - size_t newCapacity = m_Capacity; - if (newCount > m_Capacity) - { - newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8)); - } - - if (newCapacity != m_Capacity) - { - T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL; - const size_t elementsToCopy = VMA_MIN(m_Count, newCount); - if (elementsToCopy != 0) - { - memcpy(newArray, m_pArray, elementsToCopy * sizeof(T)); - } - VmaFree(m_Allocator.m_pCallbacks, m_pArray); - m_Capacity = newCapacity; - m_pArray = newArray; - } - - m_Count = newCount; -} - -template -void VmaVector::shrink_to_fit() -{ - if (m_Capacity > m_Count) - { - T* newArray = VMA_NULL; - if (m_Count > 0) - { - newArray = VmaAllocateArray(m_Allocator.m_pCallbacks, m_Count); - memcpy(newArray, m_pArray, m_Count * sizeof(T)); - } - VmaFree(m_Allocator.m_pCallbacks, m_pArray); - m_Capacity = m_Count; - m_pArray = newArray; - } -} - -template -void VmaVector::insert(size_t index, const T& src) -{ - VMA_HEAVY_ASSERT(index <= m_Count); - const size_t oldCount = size(); - resize(oldCount + 1); - if (index < oldCount) - { - memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T)); - } - m_pArray[index] = src; -} - -template -void VmaVector::remove(size_t index) -{ - VMA_HEAVY_ASSERT(index < m_Count); - const size_t oldCount = size(); - if (index < oldCount - 1) - { - memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T)); - } - resize(oldCount - 1); -} -#endif // _VMA_VECTOR_FUNCTIONS - -template -static void VmaVectorInsert(VmaVector& vec, size_t index, const T& item) -{ - vec.insert(index, item); -} - -template -static void VmaVectorRemove(VmaVector& vec, size_t index) -{ - vec.remove(index); -} -#endif // _VMA_VECTOR - -#ifndef _VMA_SMALL_VECTOR -/* -This is a vector (a variable-sized array), optimized for the case when the array is small. - -It contains some number of elements in-place, which allows it to avoid heap allocation -when the actual number of elements is below that threshold. This allows normal "small" -cases to be fast without losing generality for large inputs. -*/ -template -class VmaSmallVector -{ -public: - typedef T value_type; - typedef T* iterator; - - VmaSmallVector(const AllocatorT& allocator); - VmaSmallVector(size_t count, const AllocatorT& allocator); - template - VmaSmallVector(const VmaSmallVector&) = delete; - template - VmaSmallVector& operator=(const VmaSmallVector&) = delete; - ~VmaSmallVector() = default; - - bool empty() const { return m_Count == 0; } - size_t size() const { return m_Count; } - T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } - T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } - T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } - const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } - const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } - const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } - - iterator begin() { return data(); } - iterator end() { return data() + m_Count; } - - void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } - void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } - void push_front(const T& src) { insert(0, src); } - - void push_back(const T& src); - void resize(size_t newCount, bool freeMemory = false); - void clear(bool freeMemory = false); - void insert(size_t index, const T& src); - void remove(size_t index); - - T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } - const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } - -private: - size_t m_Count; - T m_StaticArray[N]; // Used when m_Size <= N - VmaVector m_DynamicArray; // Used when m_Size > N -}; - -#ifndef _VMA_SMALL_VECTOR_FUNCTIONS -template -VmaSmallVector::VmaSmallVector(const AllocatorT& allocator) - : m_Count(0), - m_DynamicArray(allocator) {} - -template -VmaSmallVector::VmaSmallVector(size_t count, const AllocatorT& allocator) - : m_Count(count), - m_DynamicArray(count > N ? count : 0, allocator) {} - -template -void VmaSmallVector::push_back(const T& src) -{ - const size_t newIndex = size(); - resize(newIndex + 1); - data()[newIndex] = src; -} - -template -void VmaSmallVector::resize(size_t newCount, bool freeMemory) -{ - if (newCount > N && m_Count > N) - { - // Any direction, staying in m_DynamicArray - m_DynamicArray.resize(newCount); - if (freeMemory) - { - m_DynamicArray.shrink_to_fit(); - } - } - else if (newCount > N && m_Count <= N) - { - // Growing, moving from m_StaticArray to m_DynamicArray - m_DynamicArray.resize(newCount); - if (m_Count > 0) - { - memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T)); - } - } - else if (newCount <= N && m_Count > N) - { - // Shrinking, moving from m_DynamicArray to m_StaticArray - if (newCount > 0) - { - memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T)); - } - m_DynamicArray.resize(0); - if (freeMemory) - { - m_DynamicArray.shrink_to_fit(); - } - } - else - { - // Any direction, staying in m_StaticArray - nothing to do here - } - m_Count = newCount; -} - -template -void VmaSmallVector::clear(bool freeMemory) -{ - m_DynamicArray.clear(); - if (freeMemory) - { - m_DynamicArray.shrink_to_fit(); - } - m_Count = 0; -} - -template -void VmaSmallVector::insert(size_t index, const T& src) -{ - VMA_HEAVY_ASSERT(index <= m_Count); - const size_t oldCount = size(); - resize(oldCount + 1); - T* const dataPtr = data(); - if (index < oldCount) - { - // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray. - memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T)); - } - dataPtr[index] = src; -} - -template -void VmaSmallVector::remove(size_t index) -{ - VMA_HEAVY_ASSERT(index < m_Count); - const size_t oldCount = size(); - if (index < oldCount - 1) - { - // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray. - T* const dataPtr = data(); - memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T)); - } - resize(oldCount - 1); -} -#endif // _VMA_SMALL_VECTOR_FUNCTIONS -#endif // _VMA_SMALL_VECTOR - -#ifndef _VMA_POOL_ALLOCATOR -/* -Allocator for objects of type T using a list of arrays (pools) to speed up -allocation. Number of elements that can be allocated is not bounded because -allocator can create multiple blocks. -*/ -template -class VmaPoolAllocator -{ - VMA_CLASS_NO_COPY(VmaPoolAllocator) -public: - VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity); - ~VmaPoolAllocator(); - template T* Alloc(Types&&... args); - void Free(T* ptr); - -private: - union Item - { - uint32_t NextFreeIndex; - alignas(T) char Value[sizeof(T)]; - }; - struct ItemBlock - { - Item* pItems; - uint32_t Capacity; - uint32_t FirstFreeIndex; - }; - - const VkAllocationCallbacks* m_pAllocationCallbacks; - const uint32_t m_FirstBlockCapacity; - VmaVector> m_ItemBlocks; - - ItemBlock& CreateNewBlock(); -}; - -#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS -template -VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) - : m_pAllocationCallbacks(pAllocationCallbacks), - m_FirstBlockCapacity(firstBlockCapacity), - m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks)) -{ - VMA_ASSERT(m_FirstBlockCapacity > 1); -} - -template -VmaPoolAllocator::~VmaPoolAllocator() -{ - for (size_t i = m_ItemBlocks.size(); i--;) - vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity); - m_ItemBlocks.clear(); -} - -template -template T* VmaPoolAllocator::Alloc(Types&&... args) -{ - for (size_t i = m_ItemBlocks.size(); i--; ) - { - ItemBlock& block = m_ItemBlocks[i]; - // This block has some free items: Use first one. - if (block.FirstFreeIndex != UINT32_MAX) - { - Item* const pItem = &block.pItems[block.FirstFreeIndex]; - block.FirstFreeIndex = pItem->NextFreeIndex; - T* result = (T*)&pItem->Value; - new(result)T(std::forward(args)...); // Explicit constructor call. - return result; - } - } - - // No block has free item: Create new one and use it. - ItemBlock& newBlock = CreateNewBlock(); - Item* const pItem = &newBlock.pItems[0]; - newBlock.FirstFreeIndex = pItem->NextFreeIndex; - T* result = (T*)&pItem->Value; - new(result) T(std::forward(args)...); // Explicit constructor call. - return result; -} - -template -void VmaPoolAllocator::Free(T* ptr) -{ - // Search all memory blocks to find ptr. - for (size_t i = m_ItemBlocks.size(); i--; ) - { - ItemBlock& block = m_ItemBlocks[i]; - - // Casting to union. - Item* pItemPtr; - memcpy(&pItemPtr, &ptr, sizeof(pItemPtr)); - - // Check if pItemPtr is in address range of this block. - if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity)) - { - ptr->~T(); // Explicit destructor call. - const uint32_t index = static_cast(pItemPtr - block.pItems); - pItemPtr->NextFreeIndex = block.FirstFreeIndex; - block.FirstFreeIndex = index; - return; - } - } - VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool."); -} - -template -typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock() -{ - const uint32_t newBlockCapacity = m_ItemBlocks.empty() ? - m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2; - - const ItemBlock newBlock = - { - vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity), - newBlockCapacity, - 0 - }; - - m_ItemBlocks.push_back(newBlock); - - // Setup singly-linked list of all free items in this block. - for (uint32_t i = 0; i < newBlockCapacity - 1; ++i) - newBlock.pItems[i].NextFreeIndex = i + 1; - newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX; - return m_ItemBlocks.back(); -} -#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS -#endif // _VMA_POOL_ALLOCATOR - -#ifndef _VMA_RAW_LIST -template -struct VmaListItem -{ - VmaListItem* pPrev; - VmaListItem* pNext; - T Value; -}; - -// Doubly linked list. -template -class VmaRawList -{ - VMA_CLASS_NO_COPY(VmaRawList) -public: - typedef VmaListItem ItemType; - - VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks); - // Intentionally not calling Clear, because that would be unnecessary - // computations to return all items to m_ItemAllocator as free. - ~VmaRawList() = default; - - size_t GetCount() const { return m_Count; } - bool IsEmpty() const { return m_Count == 0; } - - ItemType* Front() { return m_pFront; } - ItemType* Back() { return m_pBack; } - const ItemType* Front() const { return m_pFront; } - const ItemType* Back() const { return m_pBack; } - - ItemType* PushFront(); - ItemType* PushBack(); - ItemType* PushFront(const T& value); - ItemType* PushBack(const T& value); - void PopFront(); - void PopBack(); - - // Item can be null - it means PushBack. - ItemType* InsertBefore(ItemType* pItem); - // Item can be null - it means PushFront. - ItemType* InsertAfter(ItemType* pItem); - ItemType* InsertBefore(ItemType* pItem, const T& value); - ItemType* InsertAfter(ItemType* pItem, const T& value); - - void Clear(); - void Remove(ItemType* pItem); - -private: - const VkAllocationCallbacks* const m_pAllocationCallbacks; - VmaPoolAllocator m_ItemAllocator; - ItemType* m_pFront; - ItemType* m_pBack; - size_t m_Count; -}; - -#ifndef _VMA_RAW_LIST_FUNCTIONS -template -VmaRawList::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) - : m_pAllocationCallbacks(pAllocationCallbacks), - m_ItemAllocator(pAllocationCallbacks, 128), - m_pFront(VMA_NULL), - m_pBack(VMA_NULL), - m_Count(0) {} - -template -VmaListItem* VmaRawList::PushFront() -{ - ItemType* const pNewItem = m_ItemAllocator.Alloc(); - pNewItem->pPrev = VMA_NULL; - if (IsEmpty()) - { - pNewItem->pNext = VMA_NULL; - m_pFront = pNewItem; - m_pBack = pNewItem; - m_Count = 1; - } - else - { - pNewItem->pNext = m_pFront; - m_pFront->pPrev = pNewItem; - m_pFront = pNewItem; - ++m_Count; - } - return pNewItem; -} - -template -VmaListItem* VmaRawList::PushBack() -{ - ItemType* const pNewItem = m_ItemAllocator.Alloc(); - pNewItem->pNext = VMA_NULL; - if(IsEmpty()) - { - pNewItem->pPrev = VMA_NULL; - m_pFront = pNewItem; - m_pBack = pNewItem; - m_Count = 1; - } - else - { - pNewItem->pPrev = m_pBack; - m_pBack->pNext = pNewItem; - m_pBack = pNewItem; - ++m_Count; - } - return pNewItem; -} - -template -VmaListItem* VmaRawList::PushFront(const T& value) -{ - ItemType* const pNewItem = PushFront(); - pNewItem->Value = value; - return pNewItem; -} - -template -VmaListItem* VmaRawList::PushBack(const T& value) -{ - ItemType* const pNewItem = PushBack(); - pNewItem->Value = value; - return pNewItem; -} - -template -void VmaRawList::PopFront() -{ - VMA_HEAVY_ASSERT(m_Count > 0); - ItemType* const pFrontItem = m_pFront; - ItemType* const pNextItem = pFrontItem->pNext; - if (pNextItem != VMA_NULL) - { - pNextItem->pPrev = VMA_NULL; - } - m_pFront = pNextItem; - m_ItemAllocator.Free(pFrontItem); - --m_Count; -} - -template -void VmaRawList::PopBack() -{ - VMA_HEAVY_ASSERT(m_Count > 0); - ItemType* const pBackItem = m_pBack; - ItemType* const pPrevItem = pBackItem->pPrev; - if(pPrevItem != VMA_NULL) - { - pPrevItem->pNext = VMA_NULL; - } - m_pBack = pPrevItem; - m_ItemAllocator.Free(pBackItem); - --m_Count; -} - -template -void VmaRawList::Clear() -{ - if (IsEmpty() == false) - { - ItemType* pItem = m_pBack; - while (pItem != VMA_NULL) - { - ItemType* const pPrevItem = pItem->pPrev; - m_ItemAllocator.Free(pItem); - pItem = pPrevItem; - } - m_pFront = VMA_NULL; - m_pBack = VMA_NULL; - m_Count = 0; - } -} - -template -void VmaRawList::Remove(ItemType* pItem) -{ - VMA_HEAVY_ASSERT(pItem != VMA_NULL); - VMA_HEAVY_ASSERT(m_Count > 0); - - if(pItem->pPrev != VMA_NULL) - { - pItem->pPrev->pNext = pItem->pNext; - } - else - { - VMA_HEAVY_ASSERT(m_pFront == pItem); - m_pFront = pItem->pNext; - } - - if(pItem->pNext != VMA_NULL) - { - pItem->pNext->pPrev = pItem->pPrev; - } - else - { - VMA_HEAVY_ASSERT(m_pBack == pItem); - m_pBack = pItem->pPrev; - } - - m_ItemAllocator.Free(pItem); - --m_Count; -} - -template -VmaListItem* VmaRawList::InsertBefore(ItemType* pItem) -{ - if(pItem != VMA_NULL) - { - ItemType* const prevItem = pItem->pPrev; - ItemType* const newItem = m_ItemAllocator.Alloc(); - newItem->pPrev = prevItem; - newItem->pNext = pItem; - pItem->pPrev = newItem; - if(prevItem != VMA_NULL) - { - prevItem->pNext = newItem; - } - else - { - VMA_HEAVY_ASSERT(m_pFront == pItem); - m_pFront = newItem; - } - ++m_Count; - return newItem; - } - else - return PushBack(); -} - -template -VmaListItem* VmaRawList::InsertAfter(ItemType* pItem) -{ - if(pItem != VMA_NULL) - { - ItemType* const nextItem = pItem->pNext; - ItemType* const newItem = m_ItemAllocator.Alloc(); - newItem->pNext = nextItem; - newItem->pPrev = pItem; - pItem->pNext = newItem; - if(nextItem != VMA_NULL) - { - nextItem->pPrev = newItem; - } - else - { - VMA_HEAVY_ASSERT(m_pBack == pItem); - m_pBack = newItem; - } - ++m_Count; - return newItem; - } - else - return PushFront(); -} - -template -VmaListItem* VmaRawList::InsertBefore(ItemType* pItem, const T& value) -{ - ItemType* const newItem = InsertBefore(pItem); - newItem->Value = value; - return newItem; -} - -template -VmaListItem* VmaRawList::InsertAfter(ItemType* pItem, const T& value) -{ - ItemType* const newItem = InsertAfter(pItem); - newItem->Value = value; - return newItem; -} -#endif // _VMA_RAW_LIST_FUNCTIONS -#endif // _VMA_RAW_LIST - -#ifndef _VMA_LIST -template -class VmaList -{ - VMA_CLASS_NO_COPY(VmaList) -public: - class reverse_iterator; - class const_iterator; - class const_reverse_iterator; - - class iterator - { - friend class const_iterator; - friend class VmaList; - public: - iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} - iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - - T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } - T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } - - bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } - bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } - - iterator operator++(int) { iterator result = *this; ++*this; return result; } - iterator operator--(int) { iterator result = *this; --*this; return result; } - - iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } - iterator& operator--(); - - private: - VmaRawList* m_pList; - VmaListItem* m_pItem; - - iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} - }; - class reverse_iterator - { - friend class const_reverse_iterator; - friend class VmaList; - public: - reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} - reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - - T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } - T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } - - bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } - bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } - - reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; } - reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; } - - reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } - reverse_iterator& operator--(); - - private: - VmaRawList* m_pList; - VmaListItem* m_pItem; - - reverse_iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} - }; - class const_iterator - { - friend class VmaList; - public: - const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} - const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - - iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } - - const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } - const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } - - bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } - bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } - - const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; } - const_iterator operator--(int) { const_iterator result = *this; --* this; return result; } - - const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } - const_iterator& operator--(); - - private: - const VmaRawList* m_pList; - const VmaListItem* m_pItem; - - const_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} - }; - class const_reverse_iterator - { - friend class VmaList; - public: - const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} - const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - - reverse_iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } - - const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } - const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } - - bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } - bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } - - const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; } - const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; } - - const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } - const_reverse_iterator& operator--(); - - private: - const VmaRawList* m_pList; - const VmaListItem* m_pItem; - - const_reverse_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} - }; - - VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {} - - bool empty() const { return m_RawList.IsEmpty(); } - size_t size() const { return m_RawList.GetCount(); } - - iterator begin() { return iterator(&m_RawList, m_RawList.Front()); } - iterator end() { return iterator(&m_RawList, VMA_NULL); } - - const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); } - const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); } - - const_iterator begin() const { return cbegin(); } - const_iterator end() const { return cend(); } - - reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); } - reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); } - - const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); } - const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); } - - const_reverse_iterator rbegin() const { return crbegin(); } - const_reverse_iterator rend() const { return crend(); } - - void push_back(const T& value) { m_RawList.PushBack(value); } - iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } - - void clear() { m_RawList.Clear(); } - void erase(iterator it) { m_RawList.Remove(it.m_pItem); } - -private: - VmaRawList m_RawList; -}; - -#ifndef _VMA_LIST_FUNCTIONS -template -typename VmaList::iterator& VmaList::iterator::operator--() -{ - if (m_pItem != VMA_NULL) - { - m_pItem = m_pItem->pPrev; - } - else - { - VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); - m_pItem = m_pList->Back(); - } - return *this; -} - -template -typename VmaList::reverse_iterator& VmaList::reverse_iterator::operator--() -{ - if (m_pItem != VMA_NULL) - { - m_pItem = m_pItem->pNext; - } - else - { - VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); - m_pItem = m_pList->Front(); - } - return *this; -} - -template -typename VmaList::const_iterator& VmaList::const_iterator::operator--() -{ - if (m_pItem != VMA_NULL) - { - m_pItem = m_pItem->pPrev; - } - else - { - VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); - m_pItem = m_pList->Back(); - } - return *this; -} - -template -typename VmaList::const_reverse_iterator& VmaList::const_reverse_iterator::operator--() -{ - if (m_pItem != VMA_NULL) - { - m_pItem = m_pItem->pNext; - } - else - { - VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); - m_pItem = m_pList->Back(); - } - return *this; -} -#endif // _VMA_LIST_FUNCTIONS -#endif // _VMA_LIST - -#ifndef _VMA_INTRUSIVE_LINKED_LIST -/* -Expected interface of ItemTypeTraits: -struct MyItemTypeTraits -{ - typedef MyItem ItemType; - static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; } - static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; } - static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; } - static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; } -}; -*/ -template -class VmaIntrusiveLinkedList -{ -public: - typedef typename ItemTypeTraits::ItemType ItemType; - static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); } - static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); } - - // Movable, not copyable. - VmaIntrusiveLinkedList() = default; - VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src); - VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete; - VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src); - VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete; - ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); } - - size_t GetCount() const { return m_Count; } - bool IsEmpty() const { return m_Count == 0; } - ItemType* Front() { return m_Front; } - ItemType* Back() { return m_Back; } - const ItemType* Front() const { return m_Front; } - const ItemType* Back() const { return m_Back; } - - void PushBack(ItemType* item); - void PushFront(ItemType* item); - ItemType* PopBack(); - ItemType* PopFront(); - - // MyItem can be null - it means PushBack. - void InsertBefore(ItemType* existingItem, ItemType* newItem); - // MyItem can be null - it means PushFront. - void InsertAfter(ItemType* existingItem, ItemType* newItem); - void Remove(ItemType* item); - void RemoveAll(); - -private: - ItemType* m_Front = VMA_NULL; - ItemType* m_Back = VMA_NULL; - size_t m_Count = 0; -}; - -#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS -template -VmaIntrusiveLinkedList::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src) - : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count) -{ - src.m_Front = src.m_Back = VMA_NULL; - src.m_Count = 0; -} - -template -VmaIntrusiveLinkedList& VmaIntrusiveLinkedList::operator=(VmaIntrusiveLinkedList&& src) -{ - if (&src != this) - { - VMA_HEAVY_ASSERT(IsEmpty()); - m_Front = src.m_Front; - m_Back = src.m_Back; - m_Count = src.m_Count; - src.m_Front = src.m_Back = VMA_NULL; - src.m_Count = 0; - } - return *this; -} - -template -void VmaIntrusiveLinkedList::PushBack(ItemType* item) -{ - VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); - if (IsEmpty()) - { - m_Front = item; - m_Back = item; - m_Count = 1; - } - else - { - ItemTypeTraits::AccessPrev(item) = m_Back; - ItemTypeTraits::AccessNext(m_Back) = item; - m_Back = item; - ++m_Count; - } -} - -template -void VmaIntrusiveLinkedList::PushFront(ItemType* item) -{ - VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); - if (IsEmpty()) - { - m_Front = item; - m_Back = item; - m_Count = 1; - } - else - { - ItemTypeTraits::AccessNext(item) = m_Front; - ItemTypeTraits::AccessPrev(m_Front) = item; - m_Front = item; - ++m_Count; - } -} - -template -typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopBack() -{ - VMA_HEAVY_ASSERT(m_Count > 0); - ItemType* const backItem = m_Back; - ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem); - if (prevItem != VMA_NULL) - { - ItemTypeTraits::AccessNext(prevItem) = VMA_NULL; - } - m_Back = prevItem; - --m_Count; - ItemTypeTraits::AccessPrev(backItem) = VMA_NULL; - ItemTypeTraits::AccessNext(backItem) = VMA_NULL; - return backItem; -} - -template -typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopFront() -{ - VMA_HEAVY_ASSERT(m_Count > 0); - ItemType* const frontItem = m_Front; - ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem); - if (nextItem != VMA_NULL) - { - ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL; - } - m_Front = nextItem; - --m_Count; - ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL; - ItemTypeTraits::AccessNext(frontItem) = VMA_NULL; - return frontItem; -} - -template -void VmaIntrusiveLinkedList::InsertBefore(ItemType* existingItem, ItemType* newItem) -{ - VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); - if (existingItem != VMA_NULL) - { - ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem); - ItemTypeTraits::AccessPrev(newItem) = prevItem; - ItemTypeTraits::AccessNext(newItem) = existingItem; - ItemTypeTraits::AccessPrev(existingItem) = newItem; - if (prevItem != VMA_NULL) - { - ItemTypeTraits::AccessNext(prevItem) = newItem; - } - else - { - VMA_HEAVY_ASSERT(m_Front == existingItem); - m_Front = newItem; - } - ++m_Count; - } - else - PushBack(newItem); -} - -template -void VmaIntrusiveLinkedList::InsertAfter(ItemType* existingItem, ItemType* newItem) -{ - VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); - if (existingItem != VMA_NULL) - { - ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem); - ItemTypeTraits::AccessNext(newItem) = nextItem; - ItemTypeTraits::AccessPrev(newItem) = existingItem; - ItemTypeTraits::AccessNext(existingItem) = newItem; - if (nextItem != VMA_NULL) - { - ItemTypeTraits::AccessPrev(nextItem) = newItem; - } - else - { - VMA_HEAVY_ASSERT(m_Back == existingItem); - m_Back = newItem; - } - ++m_Count; - } - else - return PushFront(newItem); -} - -template -void VmaIntrusiveLinkedList::Remove(ItemType* item) -{ - VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0); - if (ItemTypeTraits::GetPrev(item) != VMA_NULL) - { - ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item); - } - else - { - VMA_HEAVY_ASSERT(m_Front == item); - m_Front = ItemTypeTraits::GetNext(item); - } - - if (ItemTypeTraits::GetNext(item) != VMA_NULL) - { - ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item); - } - else - { - VMA_HEAVY_ASSERT(m_Back == item); - m_Back = ItemTypeTraits::GetPrev(item); - } - ItemTypeTraits::AccessPrev(item) = VMA_NULL; - ItemTypeTraits::AccessNext(item) = VMA_NULL; - --m_Count; -} - -template -void VmaIntrusiveLinkedList::RemoveAll() -{ - if (!IsEmpty()) - { - ItemType* item = m_Back; - while (item != VMA_NULL) - { - ItemType* const prevItem = ItemTypeTraits::AccessPrev(item); - ItemTypeTraits::AccessPrev(item) = VMA_NULL; - ItemTypeTraits::AccessNext(item) = VMA_NULL; - item = prevItem; - } - m_Front = VMA_NULL; - m_Back = VMA_NULL; - m_Count = 0; - } -} -#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS -#endif // _VMA_INTRUSIVE_LINKED_LIST - -// Unused in this version. -#if 0 - -#ifndef _VMA_PAIR -template -struct VmaPair -{ - T1 first; - T2 second; - - VmaPair() : first(), second() {} - VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) {} -}; - -template -struct VmaPairFirstLess -{ - bool operator()(const VmaPair& lhs, const VmaPair& rhs) const - { - return lhs.first < rhs.first; - } - bool operator()(const VmaPair& lhs, const FirstT& rhsFirst) const - { - return lhs.first < rhsFirst; - } -}; -#endif // _VMA_PAIR - -#ifndef _VMA_MAP -/* Class compatible with subset of interface of std::unordered_map. -KeyT, ValueT must be POD because they will be stored in VmaVector. -*/ -template -class VmaMap -{ -public: - typedef VmaPair PairType; - typedef PairType* iterator; - - VmaMap(const VmaStlAllocator& allocator) : m_Vector(allocator) {} - - iterator begin() { return m_Vector.begin(); } - iterator end() { return m_Vector.end(); } - size_t size() { return m_Vector.size(); } - - void insert(const PairType& pair); - iterator find(const KeyT& key); - void erase(iterator it); - -private: - VmaVector< PairType, VmaStlAllocator> m_Vector; -}; - -#ifndef _VMA_MAP_FUNCTIONS -template -void VmaMap::insert(const PairType& pair) -{ - const size_t indexToInsert = VmaBinaryFindFirstNotLess( - m_Vector.data(), - m_Vector.data() + m_Vector.size(), - pair, - VmaPairFirstLess()) - m_Vector.data(); - VmaVectorInsert(m_Vector, indexToInsert, pair); -} - -template -VmaPair* VmaMap::find(const KeyT& key) -{ - PairType* it = VmaBinaryFindFirstNotLess( - m_Vector.data(), - m_Vector.data() + m_Vector.size(), - key, - VmaPairFirstLess()); - if ((it != m_Vector.end()) && (it->first == key)) - { - return it; - } - else - { - return m_Vector.end(); - } -} - -template -void VmaMap::erase(iterator it) -{ - VmaVectorRemove(m_Vector, it - m_Vector.begin()); -} -#endif // _VMA_MAP_FUNCTIONS -#endif // _VMA_MAP - -#endif // #if 0 - -#if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED -class VmaStringBuilder -{ -public: - VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator(allocationCallbacks)) {} - ~VmaStringBuilder() = default; - - size_t GetLength() const { return m_Data.size(); } - const char* GetData() const { return m_Data.data(); } - void AddNewLine() { Add('\n'); } - void Add(char ch) { m_Data.push_back(ch); } - - void Add(const char* pStr); - void AddNumber(uint32_t num); - void AddNumber(uint64_t num); - void AddPointer(const void* ptr); - -private: - VmaVector> m_Data; -}; - -#ifndef _VMA_STRING_BUILDER_FUNCTIONS -void VmaStringBuilder::Add(const char* pStr) -{ - const size_t strLen = strlen(pStr); - if (strLen > 0) - { - const size_t oldCount = m_Data.size(); - m_Data.resize(oldCount + strLen); - memcpy(m_Data.data() + oldCount, pStr, strLen); - } -} - -void VmaStringBuilder::AddNumber(uint32_t num) -{ - char buf[11]; - buf[10] = '\0'; - char* p = &buf[10]; - do - { - *--p = '0' + (num % 10); - num /= 10; - } while (num); - Add(p); -} - -void VmaStringBuilder::AddNumber(uint64_t num) -{ - char buf[21]; - buf[20] = '\0'; - char* p = &buf[20]; - do - { - *--p = '0' + (num % 10); - num /= 10; - } while (num); - Add(p); -} - -void VmaStringBuilder::AddPointer(const void* ptr) -{ - char buf[21]; - VmaPtrToStr(buf, sizeof(buf), ptr); - Add(buf); -} -#endif //_VMA_STRING_BUILDER_FUNCTIONS -#endif // _VMA_STRING_BUILDER - -#if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED -/* -Allows to conveniently build a correct JSON document to be written to the -VmaStringBuilder passed to the constructor. -*/ -class VmaJsonWriter -{ - VMA_CLASS_NO_COPY(VmaJsonWriter) -public: - // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object. - VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb); - ~VmaJsonWriter(); - - // Begins object by writing "{". - // Inside an object, you must call pairs of WriteString and a value, e.g.: - // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject(); - // Will write: { "A": 1, "B": 2 } - void BeginObject(bool singleLine = false); - // Ends object by writing "}". - void EndObject(); - - // Begins array by writing "[". - // Inside an array, you can write a sequence of any values. - void BeginArray(bool singleLine = false); - // Ends array by writing "[". - void EndArray(); - - // Writes a string value inside "". - // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped. - void WriteString(const char* pStr); - - // Begins writing a string value. - // Call BeginString, ContinueString, ContinueString, ..., EndString instead of - // WriteString to conveniently build the string content incrementally, made of - // parts including numbers. - void BeginString(const char* pStr = VMA_NULL); - // Posts next part of an open string. - void ContinueString(const char* pStr); - // Posts next part of an open string. The number is converted to decimal characters. - void ContinueString(uint32_t n); - void ContinueString(uint64_t n); - void ContinueString_Size(size_t n); - // Posts next part of an open string. Pointer value is converted to characters - // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00 - void ContinueString_Pointer(const void* ptr); - // Ends writing a string value by writing '"'. - void EndString(const char* pStr = VMA_NULL); - - // Writes a number value. - void WriteNumber(uint32_t n); - void WriteNumber(uint64_t n); - void WriteSize(size_t n); - // Writes a boolean value - false or true. - void WriteBool(bool b); - // Writes a null value. - void WriteNull(); - -private: - enum COLLECTION_TYPE - { - COLLECTION_TYPE_OBJECT, - COLLECTION_TYPE_ARRAY, - }; - struct StackItem - { - COLLECTION_TYPE type; - uint32_t valueCount; - bool singleLineMode; - }; - - static const char* const INDENT; - - VmaStringBuilder& m_SB; - VmaVector< StackItem, VmaStlAllocator > m_Stack; - bool m_InsideString; - - // Write size_t for less than 64bits - void WriteSize(size_t n, std::integral_constant) { m_SB.AddNumber(static_cast(n)); } - // Write size_t for 64bits - void WriteSize(size_t n, std::integral_constant) { m_SB.AddNumber(static_cast(n)); } - - void BeginValue(bool isString); - void WriteIndent(bool oneLess = false); -}; -const char* const VmaJsonWriter::INDENT = " "; - -#ifndef _VMA_JSON_WRITER_FUNCTIONS -VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) - : m_SB(sb), - m_Stack(VmaStlAllocator(pAllocationCallbacks)), - m_InsideString(false) {} - -VmaJsonWriter::~VmaJsonWriter() -{ - VMA_ASSERT(!m_InsideString); - VMA_ASSERT(m_Stack.empty()); -} - -void VmaJsonWriter::BeginObject(bool singleLine) -{ - VMA_ASSERT(!m_InsideString); - - BeginValue(false); - m_SB.Add('{'); - - StackItem item; - item.type = COLLECTION_TYPE_OBJECT; - item.valueCount = 0; - item.singleLineMode = singleLine; - m_Stack.push_back(item); -} - -void VmaJsonWriter::EndObject() -{ - VMA_ASSERT(!m_InsideString); - - WriteIndent(true); - m_SB.Add('}'); - - VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT); - m_Stack.pop_back(); -} - -void VmaJsonWriter::BeginArray(bool singleLine) -{ - VMA_ASSERT(!m_InsideString); - - BeginValue(false); - m_SB.Add('['); - - StackItem item; - item.type = COLLECTION_TYPE_ARRAY; - item.valueCount = 0; - item.singleLineMode = singleLine; - m_Stack.push_back(item); -} - -void VmaJsonWriter::EndArray() -{ - VMA_ASSERT(!m_InsideString); - - WriteIndent(true); - m_SB.Add(']'); - - VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY); - m_Stack.pop_back(); -} - -void VmaJsonWriter::WriteString(const char* pStr) -{ - BeginString(pStr); - EndString(); -} - -void VmaJsonWriter::BeginString(const char* pStr) -{ - VMA_ASSERT(!m_InsideString); - - BeginValue(true); - m_SB.Add('"'); - m_InsideString = true; - if (pStr != VMA_NULL && pStr[0] != '\0') - { - ContinueString(pStr); - } -} - -void VmaJsonWriter::ContinueString(const char* pStr) -{ - VMA_ASSERT(m_InsideString); - - const size_t strLen = strlen(pStr); - for (size_t i = 0; i < strLen; ++i) - { - char ch = pStr[i]; - if (ch == '\\') - { - m_SB.Add("\\\\"); - } - else if (ch == '"') - { - m_SB.Add("\\\""); - } - else if (ch >= 32) - { - m_SB.Add(ch); - } - else switch (ch) - { - case '\b': - m_SB.Add("\\b"); - break; - case '\f': - m_SB.Add("\\f"); - break; - case '\n': - m_SB.Add("\\n"); - break; - case '\r': - m_SB.Add("\\r"); - break; - case '\t': - m_SB.Add("\\t"); - break; - default: - VMA_ASSERT(0 && "Character not currently supported."); - break; - } - } -} - -void VmaJsonWriter::ContinueString(uint32_t n) -{ - VMA_ASSERT(m_InsideString); - m_SB.AddNumber(n); -} - -void VmaJsonWriter::ContinueString(uint64_t n) -{ - VMA_ASSERT(m_InsideString); - m_SB.AddNumber(n); -} - -void VmaJsonWriter::ContinueString_Size(size_t n) -{ - VMA_ASSERT(m_InsideString); - // Fix for AppleClang incorrect type casting - // TODO: Change to if constexpr when C++17 used as minimal standard - WriteSize(n, std::is_same{}); -} - -void VmaJsonWriter::ContinueString_Pointer(const void* ptr) -{ - VMA_ASSERT(m_InsideString); - m_SB.AddPointer(ptr); -} - -void VmaJsonWriter::EndString(const char* pStr) -{ - VMA_ASSERT(m_InsideString); - if (pStr != VMA_NULL && pStr[0] != '\0') - { - ContinueString(pStr); - } - m_SB.Add('"'); - m_InsideString = false; -} - -void VmaJsonWriter::WriteNumber(uint32_t n) -{ - VMA_ASSERT(!m_InsideString); - BeginValue(false); - m_SB.AddNumber(n); -} - -void VmaJsonWriter::WriteNumber(uint64_t n) -{ - VMA_ASSERT(!m_InsideString); - BeginValue(false); - m_SB.AddNumber(n); -} - -void VmaJsonWriter::WriteSize(size_t n) -{ - VMA_ASSERT(!m_InsideString); - BeginValue(false); - // Fix for AppleClang incorrect type casting - // TODO: Change to if constexpr when C++17 used as minimal standard - WriteSize(n, std::is_same{}); -} - -void VmaJsonWriter::WriteBool(bool b) -{ - VMA_ASSERT(!m_InsideString); - BeginValue(false); - m_SB.Add(b ? "true" : "false"); -} - -void VmaJsonWriter::WriteNull() -{ - VMA_ASSERT(!m_InsideString); - BeginValue(false); - m_SB.Add("null"); -} - -void VmaJsonWriter::BeginValue(bool isString) -{ - if (!m_Stack.empty()) - { - StackItem& currItem = m_Stack.back(); - if (currItem.type == COLLECTION_TYPE_OBJECT && - currItem.valueCount % 2 == 0) - { - VMA_ASSERT(isString); - } - - if (currItem.type == COLLECTION_TYPE_OBJECT && - currItem.valueCount % 2 != 0) - { - m_SB.Add(": "); - } - else if (currItem.valueCount > 0) - { - m_SB.Add(", "); - WriteIndent(); - } - else - { - WriteIndent(); - } - ++currItem.valueCount; - } -} - -void VmaJsonWriter::WriteIndent(bool oneLess) -{ - if (!m_Stack.empty() && !m_Stack.back().singleLineMode) - { - m_SB.AddNewLine(); - - size_t count = m_Stack.size(); - if (count > 0 && oneLess) - { - --count; - } - for (size_t i = 0; i < count; ++i) - { - m_SB.Add(INDENT); - } - } -} -#endif // _VMA_JSON_WRITER_FUNCTIONS - -static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat) -{ - json.BeginObject(); - - json.WriteString("BlockCount"); - json.WriteNumber(stat.statistics.blockCount); - json.WriteString("BlockBytes"); - json.WriteNumber(stat.statistics.blockBytes); - json.WriteString("AllocationCount"); - json.WriteNumber(stat.statistics.allocationCount); - json.WriteString("AllocationBytes"); - json.WriteNumber(stat.statistics.allocationBytes); - json.WriteString("UnusedRangeCount"); - json.WriteNumber(stat.unusedRangeCount); - - if (stat.statistics.allocationCount > 1) - { - json.WriteString("AllocationSizeMin"); - json.WriteNumber(stat.allocationSizeMin); - json.WriteString("AllocationSizeMax"); - json.WriteNumber(stat.allocationSizeMax); - } - if (stat.unusedRangeCount > 1) - { - json.WriteString("UnusedRangeSizeMin"); - json.WriteNumber(stat.unusedRangeSizeMin); - json.WriteString("UnusedRangeSizeMax"); - json.WriteNumber(stat.unusedRangeSizeMax); - } - json.EndObject(); -} -#endif // _VMA_JSON_WRITER - -#ifndef _VMA_MAPPING_HYSTERESIS - -class VmaMappingHysteresis -{ - VMA_CLASS_NO_COPY(VmaMappingHysteresis) -public: - VmaMappingHysteresis() = default; - - uint32_t GetExtraMapping() const { return m_ExtraMapping; } - - // Call when Map was called. - // Returns true if switched to extra +1 mapping reference count. - bool PostMap() - { -#if VMA_MAPPING_HYSTERESIS_ENABLED - if(m_ExtraMapping == 0) - { - ++m_MajorCounter; - if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING) - { - m_ExtraMapping = 1; - m_MajorCounter = 0; - m_MinorCounter = 0; - return true; - } - } - else // m_ExtraMapping == 1 - PostMinorCounter(); -#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED - return false; - } - - // Call when Unmap was called. - void PostUnmap() - { -#if VMA_MAPPING_HYSTERESIS_ENABLED - if(m_ExtraMapping == 0) - ++m_MajorCounter; - else // m_ExtraMapping == 1 - PostMinorCounter(); -#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED - } - - // Call when allocation was made from the memory block. - void PostAlloc() - { -#if VMA_MAPPING_HYSTERESIS_ENABLED - if(m_ExtraMapping == 1) - ++m_MajorCounter; - else // m_ExtraMapping == 0 - PostMinorCounter(); -#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED - } - - // Call when allocation was freed from the memory block. - // Returns true if switched to extra -1 mapping reference count. - bool PostFree() - { -#if VMA_MAPPING_HYSTERESIS_ENABLED - if(m_ExtraMapping == 1) - { - ++m_MajorCounter; - if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING && - m_MajorCounter > m_MinorCounter + 1) - { - m_ExtraMapping = 0; - m_MajorCounter = 0; - m_MinorCounter = 0; - return true; - } - } - else // m_ExtraMapping == 0 - PostMinorCounter(); -#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED - return false; - } - -private: - static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7; - - uint32_t m_MinorCounter = 0; - uint32_t m_MajorCounter = 0; - uint32_t m_ExtraMapping = 0; // 0 or 1. - - void PostMinorCounter() - { - if(m_MinorCounter < m_MajorCounter) - { - ++m_MinorCounter; - } - else if(m_MajorCounter > 0) - { - --m_MajorCounter; - --m_MinorCounter; - } - } -}; - -#endif // _VMA_MAPPING_HYSTERESIS - -#ifndef _VMA_DEVICE_MEMORY_BLOCK -/* -Represents a single block of device memory (`VkDeviceMemory`) with all the -data about its regions (aka suballocations, #VmaAllocation), assigned and free. - -Thread-safety: -- Access to m_pMetadata must be externally synchronized. -- Map, Unmap, Bind* are synchronized internally. -*/ -class VmaDeviceMemoryBlock -{ - VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock) -public: - VmaBlockMetadata* m_pMetadata; - - VmaDeviceMemoryBlock(VmaAllocator hAllocator); - ~VmaDeviceMemoryBlock(); - - // Always call after construction. - void Init( - VmaAllocator hAllocator, - VmaPool hParentPool, - uint32_t newMemoryTypeIndex, - VkDeviceMemory newMemory, - VkDeviceSize newSize, - uint32_t id, - uint32_t algorithm, - VkDeviceSize bufferImageGranularity); - // Always call before destruction. - void Destroy(VmaAllocator allocator); - - VmaPool GetParentPool() const { return m_hParentPool; } - VkDeviceMemory GetDeviceMemory() const { return m_hMemory; } - uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } - uint32_t GetId() const { return m_Id; } - void* GetMappedData() const { return m_pMappedData; } - uint32_t GetMapRefCount() const { return m_MapCount; } - - // Call when allocation/free was made from m_pMetadata. - // Used for m_MappingHysteresis. - void PostAlloc() { m_MappingHysteresis.PostAlloc(); } - void PostFree(VmaAllocator hAllocator); - - // Validates all data structures inside this object. If not valid, returns false. - bool Validate() const; - VkResult CheckCorruption(VmaAllocator hAllocator); - - // ppData can be null. - VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData); - void Unmap(VmaAllocator hAllocator, uint32_t count); - - VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); - VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); - - VkResult BindBufferMemory( - const VmaAllocator hAllocator, - const VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkBuffer hBuffer, - const void* pNext); - VkResult BindImageMemory( - const VmaAllocator hAllocator, - const VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkImage hImage, - const void* pNext); - -private: - VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. - uint32_t m_MemoryTypeIndex; - uint32_t m_Id; - VkDeviceMemory m_hMemory; - - /* - Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory. - Also protects m_MapCount, m_pMappedData. - Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex. - */ - VMA_MUTEX m_MapAndBindMutex; - VmaMappingHysteresis m_MappingHysteresis; - uint32_t m_MapCount; - void* m_pMappedData; -}; -#endif // _VMA_DEVICE_MEMORY_BLOCK - -#ifndef _VMA_ALLOCATION_T -struct VmaAllocation_T -{ - friend struct VmaDedicatedAllocationListItemTraits; - - enum FLAGS - { - FLAG_PERSISTENT_MAP = 0x01, - FLAG_MAPPING_ALLOWED = 0x02, - }; - -public: - enum ALLOCATION_TYPE - { - ALLOCATION_TYPE_NONE, - ALLOCATION_TYPE_BLOCK, - ALLOCATION_TYPE_DEDICATED, - }; - - // This struct is allocated using VmaPoolAllocator. - VmaAllocation_T(bool mappingAllowed); - ~VmaAllocation_T(); - - void InitBlockAllocation( - VmaDeviceMemoryBlock* block, - VmaAllocHandle allocHandle, - VkDeviceSize alignment, - VkDeviceSize size, - uint32_t memoryTypeIndex, - VmaSuballocationType suballocationType, - bool mapped); - // pMappedData not null means allocation is created with MAPPED flag. - void InitDedicatedAllocation( - VmaPool hParentPool, - uint32_t memoryTypeIndex, - VkDeviceMemory hMemory, - VmaSuballocationType suballocationType, - void* pMappedData, - VkDeviceSize size); - - ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; } - VkDeviceSize GetAlignment() const { return m_Alignment; } - VkDeviceSize GetSize() const { return m_Size; } - void* GetUserData() const { return m_pUserData; } - const char* GetName() const { return m_pName; } - VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; } - - VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; } - uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } - bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; } - bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; } - - void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; } - void SetName(VmaAllocator hAllocator, const char* pName); - void FreeName(VmaAllocator hAllocator); - uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation); - VmaAllocHandle GetAllocHandle() const; - VkDeviceSize GetOffset() const; - VmaPool GetParentPool() const; - VkDeviceMemory GetMemory() const; - void* GetMappedData() const; - - void BlockAllocMap(); - void BlockAllocUnmap(); - VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData); - void DedicatedAllocUnmap(VmaAllocator hAllocator); - -#if VMA_STATS_STRING_ENABLED - uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; } - - void InitBufferImageUsage(uint32_t bufferImageUsage); - void PrintParameters(class VmaJsonWriter& json) const; -#endif - -private: - // Allocation out of VmaDeviceMemoryBlock. - struct BlockAllocation - { - VmaDeviceMemoryBlock* m_Block; - VmaAllocHandle m_AllocHandle; - }; - // Allocation for an object that has its own private VkDeviceMemory. - struct DedicatedAllocation - { - VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. - VkDeviceMemory m_hMemory; - void* m_pMappedData; // Not null means memory is mapped. - VmaAllocation_T* m_Prev; - VmaAllocation_T* m_Next; - }; - union - { - // Allocation out of VmaDeviceMemoryBlock. - BlockAllocation m_BlockAllocation; - // Allocation for an object that has its own private VkDeviceMemory. - DedicatedAllocation m_DedicatedAllocation; - }; - - VkDeviceSize m_Alignment; - VkDeviceSize m_Size; - void* m_pUserData; - char* m_pName; - uint32_t m_MemoryTypeIndex; - uint8_t m_Type; // ALLOCATION_TYPE - uint8_t m_SuballocationType; // VmaSuballocationType - // Reference counter for vmaMapMemory()/vmaUnmapMemory(). - uint8_t m_MapCount; - uint8_t m_Flags; // enum FLAGS -#if VMA_STATS_STRING_ENABLED - uint32_t m_BufferImageUsage; // 0 if unknown. -#endif -}; -#endif // _VMA_ALLOCATION_T - -#ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS -struct VmaDedicatedAllocationListItemTraits -{ - typedef VmaAllocation_T ItemType; - - static ItemType* GetPrev(const ItemType* item) - { - VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); - return item->m_DedicatedAllocation.m_Prev; - } - static ItemType* GetNext(const ItemType* item) - { - VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); - return item->m_DedicatedAllocation.m_Next; - } - static ItemType*& AccessPrev(ItemType* item) - { - VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); - return item->m_DedicatedAllocation.m_Prev; - } - static ItemType*& AccessNext(ItemType* item) - { - VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); - return item->m_DedicatedAllocation.m_Next; - } -}; -#endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS - -#ifndef _VMA_DEDICATED_ALLOCATION_LIST -/* -Stores linked list of VmaAllocation_T objects. -Thread-safe, synchronized internally. -*/ -class VmaDedicatedAllocationList -{ -public: - VmaDedicatedAllocationList() {} - ~VmaDedicatedAllocationList(); - - void Init(bool useMutex) { m_UseMutex = useMutex; } - bool Validate(); - - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); - void AddStatistics(VmaStatistics& inoutStats); -#if VMA_STATS_STRING_ENABLED - // Writes JSON array with the list of allocations. - void BuildStatsString(VmaJsonWriter& json); -#endif - - bool IsEmpty(); - void Register(VmaAllocation alloc); - void Unregister(VmaAllocation alloc); - -private: - typedef VmaIntrusiveLinkedList DedicatedAllocationLinkedList; - - bool m_UseMutex = true; - VMA_RW_MUTEX m_Mutex; - DedicatedAllocationLinkedList m_AllocationList; -}; - -#ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS - -VmaDedicatedAllocationList::~VmaDedicatedAllocationList() -{ - VMA_HEAVY_ASSERT(Validate()); - - if (!m_AllocationList.IsEmpty()) - { - VMA_ASSERT(false && "Unfreed dedicated allocations found!"); - } -} - -bool VmaDedicatedAllocationList::Validate() -{ - const size_t declaredCount = m_AllocationList.GetCount(); - size_t actualCount = 0; - VmaMutexLockRead lock(m_Mutex, m_UseMutex); - for (VmaAllocation alloc = m_AllocationList.Front(); - alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) - { - ++actualCount; - } - VMA_VALIDATE(actualCount == declaredCount); - - return true; -} - -void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) -{ - for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item)) - { - const VkDeviceSize size = item->GetSize(); - inoutStats.statistics.blockCount++; - inoutStats.statistics.blockBytes += size; - VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize()); - } -} - -void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats) -{ - VmaMutexLockRead lock(m_Mutex, m_UseMutex); - - const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount(); - inoutStats.blockCount += allocCount; - inoutStats.allocationCount += allocCount; - - for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item)) - { - const VkDeviceSize size = item->GetSize(); - inoutStats.blockBytes += size; - inoutStats.allocationBytes += size; - } -} - -#if VMA_STATS_STRING_ENABLED -void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json) -{ - VmaMutexLockRead lock(m_Mutex, m_UseMutex); - json.BeginArray(); - for (VmaAllocation alloc = m_AllocationList.Front(); - alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) - { - json.BeginObject(true); - alloc->PrintParameters(json); - json.EndObject(); - } - json.EndArray(); -} -#endif // VMA_STATS_STRING_ENABLED - -bool VmaDedicatedAllocationList::IsEmpty() -{ - VmaMutexLockRead lock(m_Mutex, m_UseMutex); - return m_AllocationList.IsEmpty(); -} - -void VmaDedicatedAllocationList::Register(VmaAllocation alloc) -{ - VmaMutexLockWrite lock(m_Mutex, m_UseMutex); - m_AllocationList.PushBack(alloc); -} - -void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc) -{ - VmaMutexLockWrite lock(m_Mutex, m_UseMutex); - m_AllocationList.Remove(alloc); -} -#endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS -#endif // _VMA_DEDICATED_ALLOCATION_LIST - -#ifndef _VMA_SUBALLOCATION -/* -Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as -allocated memory block or free. -*/ -struct VmaSuballocation -{ - VkDeviceSize offset; - VkDeviceSize size; - void* userData; - VmaSuballocationType type; -}; - -// Comparator for offsets. -struct VmaSuballocationOffsetLess -{ - bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const - { - return lhs.offset < rhs.offset; - } -}; - -struct VmaSuballocationOffsetGreater -{ - bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const - { - return lhs.offset > rhs.offset; - } -}; - -struct VmaSuballocationItemSizeLess -{ - bool operator()(const VmaSuballocationList::iterator lhs, - const VmaSuballocationList::iterator rhs) const - { - return lhs->size < rhs->size; - } - - bool operator()(const VmaSuballocationList::iterator lhs, - VkDeviceSize rhsSize) const - { - return lhs->size < rhsSize; - } -}; -#endif // _VMA_SUBALLOCATION - -#ifndef _VMA_ALLOCATION_REQUEST -/* -Parameters of planned allocation inside a VmaDeviceMemoryBlock. -item points to a FREE suballocation. -*/ -struct VmaAllocationRequest -{ - VmaAllocHandle allocHandle; - VkDeviceSize size; - VmaSuballocationList::iterator item; - void* customData; - uint64_t algorithmData; - VmaAllocationRequestType type; -}; -#endif // _VMA_ALLOCATION_REQUEST - -#ifndef _VMA_BLOCK_METADATA -/* -Data structure used for bookkeeping of allocations and unused ranges of memory -in a single VkDeviceMemory block. -*/ -class VmaBlockMetadata -{ -public: - // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object. - VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual); - virtual ~VmaBlockMetadata() = default; - - virtual void Init(VkDeviceSize size) { m_Size = size; } - bool IsVirtual() const { return m_IsVirtual; } - VkDeviceSize GetSize() const { return m_Size; } - - // Validates all data structures inside this object. If not valid, returns false. - virtual bool Validate() const = 0; - virtual size_t GetAllocationCount() const = 0; - virtual size_t GetFreeRegionsCount() const = 0; - virtual VkDeviceSize GetSumFreeSize() const = 0; - // Returns true if this block is empty - contains only single free suballocation. - virtual bool IsEmpty() const = 0; - virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0; - virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0; - virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0; - - virtual VmaAllocHandle GetAllocationListBegin() const = 0; - virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0; - virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0; - - // Shouldn't modify blockCount. - virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0; - virtual void AddStatistics(VmaStatistics& inoutStats) const = 0; - -#if VMA_STATS_STRING_ENABLED - virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; -#endif - - // Tries to find a place for suballocation with given parameters inside this block. - // If succeeded, fills pAllocationRequest and returns true. - // If failed, returns false. - virtual bool CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags. - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) = 0; - - virtual VkResult CheckCorruption(const void* pBlockData) = 0; - - // Makes actual allocation based on request. Request must already be checked and valid. - virtual void Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) = 0; - - // Frees suballocation assigned to given memory region. - virtual void Free(VmaAllocHandle allocHandle) = 0; - - // Frees all allocations. - // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations! - virtual void Clear() = 0; - - virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0; - virtual void DebugLogAllAllocations() const = 0; - -protected: - const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } - VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } - VkDeviceSize GetDebugMargin() const { return IsVirtual() ? 0 : VMA_DEBUG_MARGIN; } - - void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const; -#if VMA_STATS_STRING_ENABLED - // mapRefCount == UINT32_MAX means unspecified. - void PrintDetailedMap_Begin(class VmaJsonWriter& json, - VkDeviceSize unusedBytes, - size_t allocationCount, - size_t unusedRangeCount) const; - void PrintDetailedMap_Allocation(class VmaJsonWriter& json, - VkDeviceSize offset, VkDeviceSize size, void* userData) const; - void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, - VkDeviceSize offset, - VkDeviceSize size) const; - void PrintDetailedMap_End(class VmaJsonWriter& json) const; -#endif - -private: - VkDeviceSize m_Size; - const VkAllocationCallbacks* m_pAllocationCallbacks; - const VkDeviceSize m_BufferImageGranularity; - const bool m_IsVirtual; -}; - -#ifndef _VMA_BLOCK_METADATA_FUNCTIONS -VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual) - : m_Size(0), - m_pAllocationCallbacks(pAllocationCallbacks), - m_BufferImageGranularity(bufferImageGranularity), - m_IsVirtual(isVirtual) {} - -void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const -{ - if (IsVirtual()) - { - VMA_DEBUG_LOG("UNFREED VIRTUAL ALLOCATION; Offset: %llu; Size: %llu; UserData: %p", offset, size, userData); - } - else - { - VMA_ASSERT(userData != VMA_NULL); - VmaAllocation allocation = reinterpret_cast(userData); - - userData = allocation->GetUserData(); - const char* name = allocation->GetName(); - -#if VMA_STATS_STRING_ENABLED - VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %s; Usage: %u", - offset, size, userData, name ? name : "vma_empty", - VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()], - allocation->GetBufferImageUsage()); -#else - VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %u", - offset, size, userData, name ? name : "vma_empty", - (uint32_t)allocation->GetSuballocationType()); -#endif // VMA_STATS_STRING_ENABLED - } - -} - -#if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, - VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const -{ - json.WriteString("TotalBytes"); - json.WriteNumber(GetSize()); - - json.WriteString("UnusedBytes"); - json.WriteSize(unusedBytes); - - json.WriteString("Allocations"); - json.WriteSize(allocationCount); - - json.WriteString("UnusedRanges"); - json.WriteSize(unusedRangeCount); - - json.WriteString("Suballocations"); - json.BeginArray(); -} - -void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, - VkDeviceSize offset, VkDeviceSize size, void* userData) const -{ - json.BeginObject(true); - - json.WriteString("Offset"); - json.WriteNumber(offset); - - if (IsVirtual()) - { - json.WriteString("Size"); - json.WriteNumber(size); - if (userData) - { - json.WriteString("CustomData"); - json.BeginString(); - json.ContinueString_Pointer(userData); - json.EndString(); - } - } - else - { - ((VmaAllocation)userData)->PrintParameters(json); - } - - json.EndObject(); -} - -void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, - VkDeviceSize offset, VkDeviceSize size) const -{ - json.BeginObject(true); - - json.WriteString("Offset"); - json.WriteNumber(offset); - - json.WriteString("Type"); - json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]); - - json.WriteString("Size"); - json.WriteNumber(size); - - json.EndObject(); -} - -void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const -{ - json.EndArray(); -} -#endif // VMA_STATS_STRING_ENABLED -#endif // _VMA_BLOCK_METADATA_FUNCTIONS -#endif // _VMA_BLOCK_METADATA - -#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY -// Before deleting object of this class remember to call 'Destroy()' -class VmaBlockBufferImageGranularity final -{ -public: - struct ValidationContext - { - const VkAllocationCallbacks* allocCallbacks; - uint16_t* pageAllocs; - }; - - VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity); - ~VmaBlockBufferImageGranularity(); - - bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; } - - void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size); - // Before destroying object you must call free it's memory - void Destroy(const VkAllocationCallbacks* pAllocationCallbacks); - - void RoundupAllocRequest(VmaSuballocationType allocType, - VkDeviceSize& inOutAllocSize, - VkDeviceSize& inOutAllocAlignment) const; - - bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, - VkDeviceSize allocSize, - VkDeviceSize blockOffset, - VkDeviceSize blockSize, - VmaSuballocationType allocType) const; - - void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size); - void FreePages(VkDeviceSize offset, VkDeviceSize size); - void Clear(); - - ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks, - bool isVirutal) const; - bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const; - bool FinishValidation(ValidationContext& ctx) const; - -private: - static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256; - - struct RegionInfo - { - uint8_t allocType; - uint16_t allocCount; - }; - - VkDeviceSize m_BufferImageGranularity; - uint32_t m_RegionCount; - RegionInfo* m_RegionInfo; - - uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); } - uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); } - - uint32_t OffsetToPageIndex(VkDeviceSize offset) const; - void AllocPage(RegionInfo& page, uint8_t allocType); -}; - -#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS -VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity) - : m_BufferImageGranularity(bufferImageGranularity), - m_RegionCount(0), - m_RegionInfo(VMA_NULL) {} - -VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity() -{ - VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!"); -} - -void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size) -{ - if (IsEnabled()) - { - m_RegionCount = static_cast(VmaDivideRoundingUp(size, m_BufferImageGranularity)); - m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount); - memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); - } -} - -void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks) -{ - if (m_RegionInfo) - { - vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount); - m_RegionInfo = VMA_NULL; - } -} - -void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType, - VkDeviceSize& inOutAllocSize, - VkDeviceSize& inOutAllocAlignment) const -{ - if (m_BufferImageGranularity > 1 && - m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY) - { - if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || - allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || - allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) - { - inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity); - inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity); - } - } -} - -bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, - VkDeviceSize allocSize, - VkDeviceSize blockOffset, - VkDeviceSize blockSize, - VmaSuballocationType allocType) const -{ - if (IsEnabled()) - { - uint32_t startPage = GetStartPage(inOutAllocOffset); - if (m_RegionInfo[startPage].allocCount > 0 && - VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[startPage].allocType), allocType)) - { - inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity); - if (blockSize < allocSize + inOutAllocOffset - blockOffset) - return true; - ++startPage; - } - uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize); - if (endPage != startPage && - m_RegionInfo[endPage].allocCount > 0 && - VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[endPage].allocType), allocType)) - { - return true; - } - } - return false; -} - -void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size) -{ - if (IsEnabled()) - { - uint32_t startPage = GetStartPage(offset); - AllocPage(m_RegionInfo[startPage], allocType); - - uint32_t endPage = GetEndPage(offset, size); - if (startPage != endPage) - AllocPage(m_RegionInfo[endPage], allocType); - } -} - -void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size) -{ - if (IsEnabled()) - { - uint32_t startPage = GetStartPage(offset); - --m_RegionInfo[startPage].allocCount; - if (m_RegionInfo[startPage].allocCount == 0) - m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; - uint32_t endPage = GetEndPage(offset, size); - if (startPage != endPage) - { - --m_RegionInfo[endPage].allocCount; - if (m_RegionInfo[endPage].allocCount == 0) - m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; - } - } -} - -void VmaBlockBufferImageGranularity::Clear() -{ - if (m_RegionInfo) - memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); -} - -VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation( - const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const -{ - ValidationContext ctx{ pAllocationCallbacks, VMA_NULL }; - if (!isVirutal && IsEnabled()) - { - ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount); - memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t)); - } - return ctx; -} - -bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx, - VkDeviceSize offset, VkDeviceSize size) const -{ - if (IsEnabled()) - { - uint32_t start = GetStartPage(offset); - ++ctx.pageAllocs[start]; - VMA_VALIDATE(m_RegionInfo[start].allocCount > 0); - - uint32_t end = GetEndPage(offset, size); - if (start != end) - { - ++ctx.pageAllocs[end]; - VMA_VALIDATE(m_RegionInfo[end].allocCount > 0); - } - } - return true; -} - -bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const -{ - // Check proper page structure - if (IsEnabled()) - { - VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!"); - - for (uint32_t page = 0; page < m_RegionCount; ++page) - { - VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount); - } - vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount); - ctx.pageAllocs = VMA_NULL; - } - return true; -} - -uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const -{ - return static_cast(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity)); -} - -void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType) -{ - // When current alloc type is free then it can be overriden by new type - if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE)) - page.allocType = allocType; - - ++page.allocCount; -} -#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS -#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY - -#if 0 -#ifndef _VMA_BLOCK_METADATA_GENERIC -class VmaBlockMetadata_Generic : public VmaBlockMetadata -{ - friend class VmaDefragmentationAlgorithm_Generic; - friend class VmaDefragmentationAlgorithm_Fast; - VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic) -public: - VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual); - virtual ~VmaBlockMetadata_Generic() = default; - - size_t GetAllocationCount() const override { return m_Suballocations.size() - m_FreeCount; } - VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } - bool IsEmpty() const override { return (m_Suballocations.size() == 1) && (m_FreeCount == 1); } - void Free(VmaAllocHandle allocHandle) override { FreeSuballocation(FindAtOffset((VkDeviceSize)allocHandle - 1)); } - VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }; - - void Init(VkDeviceSize size) override; - bool Validate() const override; - - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; - void AddStatistics(VmaStatistics& inoutStats) const override; - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override; -#endif - - bool CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) override; - - VkResult CheckCorruption(const void* pBlockData) override; - - void Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) override; - - void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; - void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; - VmaAllocHandle GetAllocationListBegin() const override; - VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; - void Clear() override; - void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; - void DebugLogAllAllocations() const override; - -private: - uint32_t m_FreeCount; - VkDeviceSize m_SumFreeSize; - VmaSuballocationList m_Suballocations; - // Suballocations that are free. Sorted by size, ascending. - VmaVector> m_FreeSuballocationsBySize; - - VkDeviceSize AlignAllocationSize(VkDeviceSize size) const { return IsVirtual() ? size : VmaAlignUp(size, (VkDeviceSize)16); } - - VmaSuballocationList::iterator FindAtOffset(VkDeviceSize offset) const; - bool ValidateFreeSuballocationList() const; - - // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem. - // If yes, fills pOffset and returns true. If no, returns false. - bool CheckAllocation( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - VmaSuballocationList::const_iterator suballocItem, - VmaAllocHandle* pAllocHandle) const; - - // Given free suballocation, it merges it with following one, which must also be free. - void MergeFreeWithNext(VmaSuballocationList::iterator item); - // Releases given suballocation, making it free. - // Merges it with adjacent free suballocations if applicable. - // Returns iterator to new free suballocation at this place. - VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem); - // Given free suballocation, it inserts it into sorted list of - // m_FreeSuballocationsBySize if it is suitable. - void RegisterFreeSuballocation(VmaSuballocationList::iterator item); - // Given free suballocation, it removes it from sorted list of - // m_FreeSuballocationsBySize if it is suitable. - void UnregisterFreeSuballocation(VmaSuballocationList::iterator item); -}; - -#ifndef _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS -VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual) - : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), - m_FreeCount(0), - m_SumFreeSize(0), - m_Suballocations(VmaStlAllocator(pAllocationCallbacks)), - m_FreeSuballocationsBySize(VmaStlAllocator(pAllocationCallbacks)) {} - -void VmaBlockMetadata_Generic::Init(VkDeviceSize size) -{ - VmaBlockMetadata::Init(size); - - m_FreeCount = 1; - m_SumFreeSize = size; - - VmaSuballocation suballoc = {}; - suballoc.offset = 0; - suballoc.size = size; - suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - - m_Suballocations.push_back(suballoc); - m_FreeSuballocationsBySize.push_back(m_Suballocations.begin()); -} - -bool VmaBlockMetadata_Generic::Validate() const -{ - VMA_VALIDATE(!m_Suballocations.empty()); - - // Expected offset of new suballocation as calculated from previous ones. - VkDeviceSize calculatedOffset = 0; - // Expected number of free suballocations as calculated from traversing their list. - uint32_t calculatedFreeCount = 0; - // Expected sum size of free suballocations as calculated from traversing their list. - VkDeviceSize calculatedSumFreeSize = 0; - // Expected number of free suballocations that should be registered in - // m_FreeSuballocationsBySize calculated from traversing their list. - size_t freeSuballocationsToRegister = 0; - // True if previous visited suballocation was free. - bool prevFree = false; - - const VkDeviceSize debugMargin = GetDebugMargin(); - - for (const auto& subAlloc : m_Suballocations) - { - // Actual offset of this suballocation doesn't match expected one. - VMA_VALIDATE(subAlloc.offset == calculatedOffset); - - const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE); - // Two adjacent free suballocations are invalid. They should be merged. - VMA_VALIDATE(!prevFree || !currFree); - - VmaAllocation alloc = (VmaAllocation)subAlloc.userData; - if (!IsVirtual()) - { - VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); - } - - if (currFree) - { - calculatedSumFreeSize += subAlloc.size; - ++calculatedFreeCount; - ++freeSuballocationsToRegister; - - // Margin required between allocations - every free space must be at least that large. - VMA_VALIDATE(subAlloc.size >= debugMargin); - } - else - { - if (!IsVirtual()) - { - VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == subAlloc.offset + 1); - VMA_VALIDATE(alloc->GetSize() == subAlloc.size); - } - - // Margin required between allocations - previous allocation must be free. - VMA_VALIDATE(debugMargin == 0 || prevFree); - } - - calculatedOffset += subAlloc.size; - prevFree = currFree; - } - - // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't - // match expected one. - VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister); - - VkDeviceSize lastSize = 0; - for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i) - { - VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i]; - - // Only free suballocations can be registered in m_FreeSuballocationsBySize. - VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE); - // They must be sorted by size ascending. - VMA_VALIDATE(suballocItem->size >= lastSize); - - lastSize = suballocItem->size; - } - - // Check if totals match calculated values. - VMA_VALIDATE(ValidateFreeSuballocationList()); - VMA_VALIDATE(calculatedOffset == GetSize()); - VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize); - VMA_VALIDATE(calculatedFreeCount == m_FreeCount); - - return true; -} - -void VmaBlockMetadata_Generic::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const -{ - const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); - inoutStats.statistics.blockCount++; - inoutStats.statistics.blockBytes += GetSize(); - - for (const auto& suballoc : m_Suballocations) - { - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) - VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); - else - VmaAddDetailedStatisticsUnusedRange(inoutStats, suballoc.size); - } -} - -void VmaBlockMetadata_Generic::AddStatistics(VmaStatistics& inoutStats) const -{ - inoutStats.blockCount++; - inoutStats.allocationCount += (uint32_t)m_Suballocations.size() - m_FreeCount; - inoutStats.blockBytes += GetSize(); - inoutStats.allocationBytes += GetSize() - m_SumFreeSize; -} - -#if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const -{ - PrintDetailedMap_Begin(json, - m_SumFreeSize, // unusedBytes - m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount - m_FreeCount, // unusedRangeCount - mapRefCount); - - for (const auto& suballoc : m_Suballocations) - { - if (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE) - { - PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size); - } - else - { - PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); - } - } - - PrintDetailedMap_End(json); -} -#endif // VMA_STATS_STRING_ENABLED - -bool VmaBlockMetadata_Generic::CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) -{ - VMA_ASSERT(allocSize > 0); - VMA_ASSERT(!upperAddress); - VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); - VMA_ASSERT(pAllocationRequest != VMA_NULL); - VMA_HEAVY_ASSERT(Validate()); - - allocSize = AlignAllocationSize(allocSize); - - pAllocationRequest->type = VmaAllocationRequestType::Normal; - pAllocationRequest->size = allocSize; - - const VkDeviceSize debugMargin = GetDebugMargin(); - - // There is not enough total free space in this block to fulfill the request: Early return. - if (m_SumFreeSize < allocSize + debugMargin) - { - return false; - } - - // New algorithm, efficiently searching freeSuballocationsBySize. - const size_t freeSuballocCount = m_FreeSuballocationsBySize.size(); - if (freeSuballocCount > 0) - { - if (strategy == 0 || - strategy == VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT) - { - // Find first free suballocation with size not less than allocSize + debugMargin. - VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( - m_FreeSuballocationsBySize.data(), - m_FreeSuballocationsBySize.data() + freeSuballocCount, - allocSize + debugMargin, - VmaSuballocationItemSizeLess()); - size_t index = it - m_FreeSuballocationsBySize.data(); - for (; index < freeSuballocCount; ++index) - { - if (CheckAllocation( - allocSize, - allocAlignment, - allocType, - m_FreeSuballocationsBySize[index], - &pAllocationRequest->allocHandle)) - { - pAllocationRequest->item = m_FreeSuballocationsBySize[index]; - return true; - } - } - } - else if (strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET) - { - for (VmaSuballocationList::iterator it = m_Suballocations.begin(); - it != m_Suballocations.end(); - ++it) - { - if (it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation( - allocSize, - allocAlignment, - allocType, - it, - &pAllocationRequest->allocHandle)) - { - pAllocationRequest->item = it; - return true; - } - } - } - else - { - VMA_ASSERT(strategy & (VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT )); - // Search staring from biggest suballocations. - for (size_t index = freeSuballocCount; index--; ) - { - if (CheckAllocation( - allocSize, - allocAlignment, - allocType, - m_FreeSuballocationsBySize[index], - &pAllocationRequest->allocHandle)) - { - pAllocationRequest->item = m_FreeSuballocationsBySize[index]; - return true; - } - } - } - } - - return false; -} - -VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData) -{ - for (auto& suballoc : m_Suballocations) - { - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) - { - if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) - { - VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); - return VK_ERROR_UNKNOWN_COPY; - } - } - } - - return VK_SUCCESS; -} - -void VmaBlockMetadata_Generic::Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) -{ - VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); - VMA_ASSERT(request.item != m_Suballocations.end()); - VmaSuballocation& suballoc = *request.item; - // Given suballocation is a free block. - VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); - - // Given offset is inside this suballocation. - VMA_ASSERT((VkDeviceSize)request.allocHandle - 1 >= suballoc.offset); - const VkDeviceSize paddingBegin = (VkDeviceSize)request.allocHandle - suballoc.offset - 1; - VMA_ASSERT(suballoc.size >= paddingBegin + request.size); - const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - request.size; - - // Unregister this free suballocation from m_FreeSuballocationsBySize and update - // it to become used. - UnregisterFreeSuballocation(request.item); - - suballoc.offset = (VkDeviceSize)request.allocHandle - 1; - suballoc.size = request.size; - suballoc.type = type; - suballoc.userData = userData; - - // If there are any free bytes remaining at the end, insert new free suballocation after current one. - if (paddingEnd) - { - VmaSuballocation paddingSuballoc = {}; - paddingSuballoc.offset = suballoc.offset + suballoc.size; - paddingSuballoc.size = paddingEnd; - paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - VmaSuballocationList::iterator next = request.item; - ++next; - const VmaSuballocationList::iterator paddingEndItem = - m_Suballocations.insert(next, paddingSuballoc); - RegisterFreeSuballocation(paddingEndItem); - } - - // If there are any free bytes remaining at the beginning, insert new free suballocation before current one. - if (paddingBegin) - { - VmaSuballocation paddingSuballoc = {}; - paddingSuballoc.offset = suballoc.offset - paddingBegin; - paddingSuballoc.size = paddingBegin; - paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - const VmaSuballocationList::iterator paddingBeginItem = - m_Suballocations.insert(request.item, paddingSuballoc); - RegisterFreeSuballocation(paddingBeginItem); - } - - // Update totals. - m_FreeCount = m_FreeCount - 1; - if (paddingBegin > 0) - { - ++m_FreeCount; - } - if (paddingEnd > 0) - { - ++m_FreeCount; - } - m_SumFreeSize -= request.size; -} - -void VmaBlockMetadata_Generic::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) -{ - outInfo.offset = (VkDeviceSize)allocHandle - 1; - const VmaSuballocation& suballoc = *FindAtOffset(outInfo.offset); - outInfo.size = suballoc.size; - outInfo.pUserData = suballoc.userData; -} - -void* VmaBlockMetadata_Generic::GetAllocationUserData(VmaAllocHandle allocHandle) const -{ - return FindAtOffset((VkDeviceSize)allocHandle - 1)->userData; -} - -VmaAllocHandle VmaBlockMetadata_Generic::GetAllocationListBegin() const -{ - if (IsEmpty()) - return VK_NULL_HANDLE; - - for (const auto& suballoc : m_Suballocations) - { - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) - return (VmaAllocHandle)(suballoc.offset + 1); - } - VMA_ASSERT(false && "Should contain at least 1 allocation!"); - return VK_NULL_HANDLE; -} - -VmaAllocHandle VmaBlockMetadata_Generic::GetNextAllocation(VmaAllocHandle prevAlloc) const -{ - VmaSuballocationList::const_iterator prev = FindAtOffset((VkDeviceSize)prevAlloc - 1); - - for (VmaSuballocationList::const_iterator it = ++prev; it != m_Suballocations.end(); ++it) - { - if (it->type != VMA_SUBALLOCATION_TYPE_FREE) - return (VmaAllocHandle)(it->offset + 1); - } - return VK_NULL_HANDLE; -} - -void VmaBlockMetadata_Generic::Clear() -{ - const VkDeviceSize size = GetSize(); - - VMA_ASSERT(IsVirtual()); - m_FreeCount = 1; - m_SumFreeSize = size; - m_Suballocations.clear(); - m_FreeSuballocationsBySize.clear(); - - VmaSuballocation suballoc = {}; - suballoc.offset = 0; - suballoc.size = size; - suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - m_Suballocations.push_back(suballoc); - - m_FreeSuballocationsBySize.push_back(m_Suballocations.begin()); -} - -void VmaBlockMetadata_Generic::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) -{ - VmaSuballocation& suballoc = *FindAtOffset((VkDeviceSize)allocHandle - 1); - suballoc.userData = userData; -} - -void VmaBlockMetadata_Generic::DebugLogAllAllocations() const -{ - for (const auto& suballoc : m_Suballocations) - { - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) - DebugLogAllocation(suballoc.offset, suballoc.size, suballoc.userData); - } -} - -VmaSuballocationList::iterator VmaBlockMetadata_Generic::FindAtOffset(VkDeviceSize offset) const -{ - VMA_HEAVY_ASSERT(!m_Suballocations.empty()); - const VkDeviceSize last = m_Suballocations.rbegin()->offset; - if (last == offset) - return m_Suballocations.rbegin().drop_const(); - const VkDeviceSize first = m_Suballocations.begin()->offset; - if (first == offset) - return m_Suballocations.begin().drop_const(); - - const size_t suballocCount = m_Suballocations.size(); - const VkDeviceSize step = (last - first + m_Suballocations.begin()->size) / suballocCount; - auto findSuballocation = [&](auto begin, auto end) -> VmaSuballocationList::iterator - { - for (auto suballocItem = begin; - suballocItem != end; - ++suballocItem) - { - if (suballocItem->offset == offset) - return suballocItem.drop_const(); - } - VMA_ASSERT(false && "Not found!"); - return m_Suballocations.end().drop_const(); - }; - // If requested offset is closer to the end of range, search from the end - if (offset - first > suballocCount * step / 2) - { - return findSuballocation(m_Suballocations.rbegin(), m_Suballocations.rend()); - } - return findSuballocation(m_Suballocations.begin(), m_Suballocations.end()); -} - -bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const -{ - VkDeviceSize lastSize = 0; - for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i) - { - const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i]; - - VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE); - VMA_VALIDATE(it->size >= lastSize); - lastSize = it->size; - } - return true; -} - -bool VmaBlockMetadata_Generic::CheckAllocation( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - VmaSuballocationList::const_iterator suballocItem, - VmaAllocHandle* pAllocHandle) const -{ - VMA_ASSERT(allocSize > 0); - VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); - VMA_ASSERT(suballocItem != m_Suballocations.cend()); - VMA_ASSERT(pAllocHandle != VMA_NULL); - - const VkDeviceSize debugMargin = GetDebugMargin(); - const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); - - const VmaSuballocation& suballoc = *suballocItem; - VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); - - // Size of this suballocation is too small for this request: Early return. - if (suballoc.size < allocSize) - { - return false; - } - - // Start from offset equal to beginning of this suballocation. - VkDeviceSize offset = suballoc.offset + (suballocItem == m_Suballocations.cbegin() ? 0 : GetDebugMargin()); - - // Apply debugMargin from the end of previous alloc. - if (debugMargin > 0) - { - offset += debugMargin; - } - - // Apply alignment. - offset = VmaAlignUp(offset, allocAlignment); - - // Check previous suballocations for BufferImageGranularity conflicts. - // Make bigger alignment if necessary. - if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment) - { - bool bufferImageGranularityConflict = false; - VmaSuballocationList::const_iterator prevSuballocItem = suballocItem; - while (prevSuballocItem != m_Suballocations.cbegin()) - { - --prevSuballocItem; - const VmaSuballocation& prevSuballoc = *prevSuballocItem; - if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, offset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) - { - bufferImageGranularityConflict = true; - break; - } - } - else - // Already on previous page. - break; - } - if (bufferImageGranularityConflict) - { - offset = VmaAlignUp(offset, bufferImageGranularity); - } - } - - // Calculate padding at the beginning based on current offset. - const VkDeviceSize paddingBegin = offset - suballoc.offset; - - // Fail if requested size plus margin after is bigger than size of this suballocation. - if (paddingBegin + allocSize + debugMargin > suballoc.size) - { - return false; - } - - // Check next suballocations for BufferImageGranularity conflicts. - // If conflict exists, allocation cannot be made here. - if (allocSize % bufferImageGranularity || offset % bufferImageGranularity) - { - VmaSuballocationList::const_iterator nextSuballocItem = suballocItem; - ++nextSuballocItem; - while (nextSuballocItem != m_Suballocations.cend()) - { - const VmaSuballocation& nextSuballoc = *nextSuballocItem; - if (VmaBlocksOnSamePage(offset, allocSize, nextSuballoc.offset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) - { - return false; - } - } - else - { - // Already on next page. - break; - } - ++nextSuballocItem; - } - } - - *pAllocHandle = (VmaAllocHandle)(offset + 1); - // All tests passed: Success. pAllocHandle is already filled. - return true; -} - -void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item) -{ - VMA_ASSERT(item != m_Suballocations.end()); - VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); - - VmaSuballocationList::iterator nextItem = item; - ++nextItem; - VMA_ASSERT(nextItem != m_Suballocations.end()); - VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE); - - item->size += nextItem->size; - --m_FreeCount; - m_Suballocations.erase(nextItem); -} - -VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem) -{ - // Change this suballocation to be marked as free. - VmaSuballocation& suballoc = *suballocItem; - suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - suballoc.userData = VMA_NULL; - - // Update totals. - ++m_FreeCount; - m_SumFreeSize += suballoc.size; - - // Merge with previous and/or next suballocation if it's also free. - bool mergeWithNext = false; - bool mergeWithPrev = false; - - VmaSuballocationList::iterator nextItem = suballocItem; - ++nextItem; - if ((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)) - { - mergeWithNext = true; - } - - VmaSuballocationList::iterator prevItem = suballocItem; - if (suballocItem != m_Suballocations.begin()) - { - --prevItem; - if (prevItem->type == VMA_SUBALLOCATION_TYPE_FREE) - { - mergeWithPrev = true; - } - } - - if (mergeWithNext) - { - UnregisterFreeSuballocation(nextItem); - MergeFreeWithNext(suballocItem); - } - - if (mergeWithPrev) - { - UnregisterFreeSuballocation(prevItem); - MergeFreeWithNext(prevItem); - RegisterFreeSuballocation(prevItem); - return prevItem; - } - else - { - RegisterFreeSuballocation(suballocItem); - return suballocItem; - } -} - -void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item) -{ - VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); - VMA_ASSERT(item->size > 0); - - // You may want to enable this validation at the beginning or at the end of - // this function, depending on what do you want to check. - VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); - - if (m_FreeSuballocationsBySize.empty()) - { - m_FreeSuballocationsBySize.push_back(item); - } - else - { - VmaVectorInsertSorted(m_FreeSuballocationsBySize, item); - } - - //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); -} - -void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item) -{ - VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); - VMA_ASSERT(item->size > 0); - - // You may want to enable this validation at the beginning or at the end of - // this function, depending on what do you want to check. - VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); - - VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( - m_FreeSuballocationsBySize.data(), - m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(), - item, - VmaSuballocationItemSizeLess()); - for (size_t index = it - m_FreeSuballocationsBySize.data(); - index < m_FreeSuballocationsBySize.size(); - ++index) - { - if (m_FreeSuballocationsBySize[index] == item) - { - VmaVectorRemove(m_FreeSuballocationsBySize, index); - return; - } - VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found."); - } - VMA_ASSERT(0 && "Not found."); - - //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); -} -#endif // _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS -#endif // _VMA_BLOCK_METADATA_GENERIC -#endif // #if 0 - -#ifndef _VMA_BLOCK_METADATA_LINEAR -/* -Allocations and their references in internal data structure look like this: - -if(m_2ndVectorMode == SECOND_VECTOR_EMPTY): - - 0 +-------+ - | | - | | - | | - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount] - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount + 1] - +-------+ - | ... | - +-------+ - | Alloc | 1st[1st.size() - 1] - +-------+ - | | - | | - | | -GetSize() +-------+ - -if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER): - - 0 +-------+ - | Alloc | 2nd[0] - +-------+ - | Alloc | 2nd[1] - +-------+ - | ... | - +-------+ - | Alloc | 2nd[2nd.size() - 1] - +-------+ - | | - | | - | | - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount] - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount + 1] - +-------+ - | ... | - +-------+ - | Alloc | 1st[1st.size() - 1] - +-------+ - | | -GetSize() +-------+ - -if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK): - - 0 +-------+ - | | - | | - | | - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount] - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount + 1] - +-------+ - | ... | - +-------+ - | Alloc | 1st[1st.size() - 1] - +-------+ - | | - | | - | | - +-------+ - | Alloc | 2nd[2nd.size() - 1] - +-------+ - | ... | - +-------+ - | Alloc | 2nd[1] - +-------+ - | Alloc | 2nd[0] -GetSize() +-------+ - -*/ -class VmaBlockMetadata_Linear : public VmaBlockMetadata -{ - VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear) -public: - VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual); - virtual ~VmaBlockMetadata_Linear() = default; - - VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } - bool IsEmpty() const override { return GetAllocationCount() == 0; } - VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }; - - void Init(VkDeviceSize size) override; - bool Validate() const override; - size_t GetAllocationCount() const override; - size_t GetFreeRegionsCount() const override; - - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; - void AddStatistics(VmaStatistics& inoutStats) const override; - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json) const override; -#endif - - bool CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) override; - - VkResult CheckCorruption(const void* pBlockData) override; - - void Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) override; - - void Free(VmaAllocHandle allocHandle) override; - void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; - void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; - VmaAllocHandle GetAllocationListBegin() const override; - VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; - VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; - void Clear() override; - void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; - void DebugLogAllAllocations() const override; - -private: - /* - There are two suballocation vectors, used in ping-pong way. - The one with index m_1stVectorIndex is called 1st. - The one with index (m_1stVectorIndex ^ 1) is called 2nd. - 2nd can be non-empty only when 1st is not empty. - When 2nd is not empty, m_2ndVectorMode indicates its mode of operation. - */ - typedef VmaVector> SuballocationVectorType; - - enum SECOND_VECTOR_MODE - { - SECOND_VECTOR_EMPTY, - /* - Suballocations in 2nd vector are created later than the ones in 1st, but they - all have smaller offset. - */ - SECOND_VECTOR_RING_BUFFER, - /* - Suballocations in 2nd vector are upper side of double stack. - They all have offsets higher than those in 1st vector. - Top of this stack means smaller offsets, but higher indices in this vector. - */ - SECOND_VECTOR_DOUBLE_STACK, - }; - - VkDeviceSize m_SumFreeSize; - SuballocationVectorType m_Suballocations0, m_Suballocations1; - uint32_t m_1stVectorIndex; - SECOND_VECTOR_MODE m_2ndVectorMode; - // Number of items in 1st vector with hAllocation = null at the beginning. - size_t m_1stNullItemsBeginCount; - // Number of other items in 1st vector with hAllocation = null somewhere in the middle. - size_t m_1stNullItemsMiddleCount; - // Number of items in 2nd vector with hAllocation = null. - size_t m_2ndNullItemsCount; - - SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } - SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } - const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } - const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } - - VmaSuballocation& FindSuballocation(VkDeviceSize offset) const; - bool ShouldCompact1st() const; - void CleanupAfterFree(); - - bool CreateAllocationRequest_LowerAddress( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest); - bool CreateAllocationRequest_UpperAddress( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest); -}; - -#ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS -VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual) - : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), - m_SumFreeSize(0), - m_Suballocations0(VmaStlAllocator(pAllocationCallbacks)), - m_Suballocations1(VmaStlAllocator(pAllocationCallbacks)), - m_1stVectorIndex(0), - m_2ndVectorMode(SECOND_VECTOR_EMPTY), - m_1stNullItemsBeginCount(0), - m_1stNullItemsMiddleCount(0), - m_2ndNullItemsCount(0) {} - -void VmaBlockMetadata_Linear::Init(VkDeviceSize size) -{ - VmaBlockMetadata::Init(size); - m_SumFreeSize = size; -} - -bool VmaBlockMetadata_Linear::Validate() const -{ - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - - VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY)); - VMA_VALIDATE(!suballocations1st.empty() || - suballocations2nd.empty() || - m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); - - if (!suballocations1st.empty()) - { - // Null item at the beginning should be accounted into m_1stNullItemsBeginCount. - VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE); - // Null item at the end should be just pop_back(). - VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE); - } - if (!suballocations2nd.empty()) - { - // Null item at the end should be just pop_back(). - VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE); - } - - VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size()); - VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size()); - - VkDeviceSize sumUsedSize = 0; - const size_t suballoc1stCount = suballocations1st.size(); - const VkDeviceSize debugMargin = GetDebugMargin(); - VkDeviceSize offset = 0; - - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - const size_t suballoc2ndCount = suballocations2nd.size(); - size_t nullItem2ndCount = 0; - for (size_t i = 0; i < suballoc2ndCount; ++i) - { - const VmaSuballocation& suballoc = suballocations2nd[i]; - const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); - - VmaAllocation const alloc = (VmaAllocation)suballoc.userData; - if (!IsVirtual()) - { - VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); - } - VMA_VALIDATE(suballoc.offset >= offset); - - if (!currFree) - { - if (!IsVirtual()) - { - VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); - VMA_VALIDATE(alloc->GetSize() == suballoc.size); - } - sumUsedSize += suballoc.size; - } - else - { - ++nullItem2ndCount; - } - - offset = suballoc.offset + suballoc.size + debugMargin; - } - - VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); - } - - for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i) - { - const VmaSuballocation& suballoc = suballocations1st[i]; - VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE && - suballoc.userData == VMA_NULL); - } - - size_t nullItem1stCount = m_1stNullItemsBeginCount; - - for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) - { - const VmaSuballocation& suballoc = suballocations1st[i]; - const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); - - VmaAllocation const alloc = (VmaAllocation)suballoc.userData; - if (!IsVirtual()) - { - VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); - } - VMA_VALIDATE(suballoc.offset >= offset); - VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree); - - if (!currFree) - { - if (!IsVirtual()) - { - VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); - VMA_VALIDATE(alloc->GetSize() == suballoc.size); - } - sumUsedSize += suballoc.size; - } - else - { - ++nullItem1stCount; - } - - offset = suballoc.offset + suballoc.size + debugMargin; - } - VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount); - - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - const size_t suballoc2ndCount = suballocations2nd.size(); - size_t nullItem2ndCount = 0; - for (size_t i = suballoc2ndCount; i--; ) - { - const VmaSuballocation& suballoc = suballocations2nd[i]; - const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); - - VmaAllocation const alloc = (VmaAllocation)suballoc.userData; - if (!IsVirtual()) - { - VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); - } - VMA_VALIDATE(suballoc.offset >= offset); - - if (!currFree) - { - if (!IsVirtual()) - { - VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); - VMA_VALIDATE(alloc->GetSize() == suballoc.size); - } - sumUsedSize += suballoc.size; - } - else - { - ++nullItem2ndCount; - } - - offset = suballoc.offset + suballoc.size + debugMargin; - } - - VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); - } - - VMA_VALIDATE(offset <= GetSize()); - VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize); - - return true; -} - -size_t VmaBlockMetadata_Linear::GetAllocationCount() const -{ - return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount + - AccessSuballocations2nd().size() - m_2ndNullItemsCount; -} - -size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const -{ - // Function only used for defragmentation, which is disabled for this algorithm - VMA_ASSERT(0); - return SIZE_MAX; -} - -void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const -{ - const VkDeviceSize size = GetSize(); - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - const size_t suballoc1stCount = suballocations1st.size(); - const size_t suballoc2ndCount = suballocations2nd.size(); - - inoutStats.statistics.blockCount++; - inoutStats.statistics.blockBytes += size; - - VkDeviceSize lastOffset = 0; - - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; - size_t nextAlloc2ndIndex = 0; - while (lastOffset < freeSpace2ndTo1stEnd) - { - // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - ++nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex < suballoc2ndCount) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc2ndIndex; - } - // We are at the end. - else - { - // There is free space from lastOffset to freeSpace2ndTo1stEnd. - if (lastOffset < freeSpace2ndTo1stEnd) - { - const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; - VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); - } - - // End of loop. - lastOffset = freeSpace2ndTo1stEnd; - } - } - } - - size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; - const VkDeviceSize freeSpace1stTo2ndEnd = - m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; - while (lastOffset < freeSpace1stTo2ndEnd) - { - // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) - { - ++nextAlloc1stIndex; - } - - // Found non-null allocation. - if (nextAlloc1stIndex < suballoc1stCount) - { - const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc1stIndex; - } - // We are at the end. - else - { - // There is free space from lastOffset to freeSpace1stTo2ndEnd. - if (lastOffset < freeSpace1stTo2ndEnd) - { - const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; - VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); - } - - // End of loop. - lastOffset = freeSpace1stTo2ndEnd; - } - } - - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; - while (lastOffset < size) - { - // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - --nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex != SIZE_MAX) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - --nextAlloc2ndIndex; - } - // We are at the end. - else - { - // There is free space from lastOffset to size. - if (lastOffset < size) - { - const VkDeviceSize unusedRangeSize = size - lastOffset; - VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); - } - - // End of loop. - lastOffset = size; - } - } - } -} - -void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const -{ - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - const VkDeviceSize size = GetSize(); - const size_t suballoc1stCount = suballocations1st.size(); - const size_t suballoc2ndCount = suballocations2nd.size(); - - inoutStats.blockCount++; - inoutStats.blockBytes += size; - inoutStats.allocationBytes += size - m_SumFreeSize; - - VkDeviceSize lastOffset = 0; - - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; - size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount; - while (lastOffset < freeSpace2ndTo1stEnd) - { - // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - ++nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex < suballoc2ndCount) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - ++inoutStats.allocationCount; - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc2ndIndex; - } - // We are at the end. - else - { - if (lastOffset < freeSpace2ndTo1stEnd) - { - // There is free space from lastOffset to freeSpace2ndTo1stEnd. - const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; - } - - // End of loop. - lastOffset = freeSpace2ndTo1stEnd; - } - } - } - - size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; - const VkDeviceSize freeSpace1stTo2ndEnd = - m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; - while (lastOffset < freeSpace1stTo2ndEnd) - { - // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) - { - ++nextAlloc1stIndex; - } - - // Found non-null allocation. - if (nextAlloc1stIndex < suballoc1stCount) - { - const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - ++inoutStats.allocationCount; - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc1stIndex; - } - // We are at the end. - else - { - if (lastOffset < freeSpace1stTo2ndEnd) - { - // There is free space from lastOffset to freeSpace1stTo2ndEnd. - const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; - } - - // End of loop. - lastOffset = freeSpace1stTo2ndEnd; - } - } - - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; - while (lastOffset < size) - { - // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - --nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex != SIZE_MAX) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - ++inoutStats.allocationCount; - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - --nextAlloc2ndIndex; - } - // We are at the end. - else - { - if (lastOffset < size) - { - // There is free space from lastOffset to size. - const VkDeviceSize unusedRangeSize = size - lastOffset; - } - - // End of loop. - lastOffset = size; - } - } - } -} - -#if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const -{ - const VkDeviceSize size = GetSize(); - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - const size_t suballoc1stCount = suballocations1st.size(); - const size_t suballoc2ndCount = suballocations2nd.size(); - - // FIRST PASS - - size_t unusedRangeCount = 0; - VkDeviceSize usedBytes = 0; - - VkDeviceSize lastOffset = 0; - - size_t alloc2ndCount = 0; - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; - size_t nextAlloc2ndIndex = 0; - while (lastOffset < freeSpace2ndTo1stEnd) - { - // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - ++nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex < suballoc2ndCount) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - ++unusedRangeCount; - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - ++alloc2ndCount; - usedBytes += suballoc.size; - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc2ndIndex; - } - // We are at the end. - else - { - if (lastOffset < freeSpace2ndTo1stEnd) - { - // There is free space from lastOffset to freeSpace2ndTo1stEnd. - ++unusedRangeCount; - } - - // End of loop. - lastOffset = freeSpace2ndTo1stEnd; - } - } - } - - size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; - size_t alloc1stCount = 0; - const VkDeviceSize freeSpace1stTo2ndEnd = - m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; - while (lastOffset < freeSpace1stTo2ndEnd) - { - // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) - { - ++nextAlloc1stIndex; - } - - // Found non-null allocation. - if (nextAlloc1stIndex < suballoc1stCount) - { - const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - ++unusedRangeCount; - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - ++alloc1stCount; - usedBytes += suballoc.size; - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc1stIndex; - } - // We are at the end. - else - { - if (lastOffset < size) - { - // There is free space from lastOffset to freeSpace1stTo2ndEnd. - ++unusedRangeCount; - } - - // End of loop. - lastOffset = freeSpace1stTo2ndEnd; - } - } - - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; - while (lastOffset < size) - { - // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - --nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex != SIZE_MAX) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - ++unusedRangeCount; - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - ++alloc2ndCount; - usedBytes += suballoc.size; - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - --nextAlloc2ndIndex; - } - // We are at the end. - else - { - if (lastOffset < size) - { - // There is free space from lastOffset to size. - ++unusedRangeCount; - } - - // End of loop. - lastOffset = size; - } - } - } - - const VkDeviceSize unusedBytes = size - usedBytes; - PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount); - - // SECOND PASS - lastOffset = 0; - - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; - size_t nextAlloc2ndIndex = 0; - while (lastOffset < freeSpace2ndTo1stEnd) - { - // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - ++nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex < suballoc2ndCount) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc2ndIndex; - } - // We are at the end. - else - { - if (lastOffset < freeSpace2ndTo1stEnd) - { - // There is free space from lastOffset to freeSpace2ndTo1stEnd. - const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; - PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); - } - - // End of loop. - lastOffset = freeSpace2ndTo1stEnd; - } - } - } - - nextAlloc1stIndex = m_1stNullItemsBeginCount; - while (lastOffset < freeSpace1stTo2ndEnd) - { - // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) - { - ++nextAlloc1stIndex; - } - - // Found non-null allocation. - if (nextAlloc1stIndex < suballoc1stCount) - { - const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc1stIndex; - } - // We are at the end. - else - { - if (lastOffset < freeSpace1stTo2ndEnd) - { - // There is free space from lastOffset to freeSpace1stTo2ndEnd. - const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; - PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); - } - - // End of loop. - lastOffset = freeSpace1stTo2ndEnd; - } - } - - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; - while (lastOffset < size) - { - // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - --nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex != SIZE_MAX) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - --nextAlloc2ndIndex; - } - // We are at the end. - else - { - if (lastOffset < size) - { - // There is free space from lastOffset to size. - const VkDeviceSize unusedRangeSize = size - lastOffset; - PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); - } - - // End of loop. - lastOffset = size; - } - } - } - - PrintDetailedMap_End(json); -} -#endif // VMA_STATS_STRING_ENABLED - -bool VmaBlockMetadata_Linear::CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) -{ - VMA_ASSERT(allocSize > 0); - VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); - VMA_ASSERT(pAllocationRequest != VMA_NULL); - VMA_HEAVY_ASSERT(Validate()); - pAllocationRequest->size = allocSize; - return upperAddress ? - CreateAllocationRequest_UpperAddress( - allocSize, allocAlignment, allocType, strategy, pAllocationRequest) : - CreateAllocationRequest_LowerAddress( - allocSize, allocAlignment, allocType, strategy, pAllocationRequest); -} - -VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) -{ - VMA_ASSERT(!IsVirtual()); - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) - { - const VmaSuballocation& suballoc = suballocations1st[i]; - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) - { - if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) - { - VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); - return VK_ERROR_UNKNOWN_COPY; - } - } - } - - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i) - { - const VmaSuballocation& suballoc = suballocations2nd[i]; - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) - { - if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) - { - VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); - return VK_ERROR_UNKNOWN_COPY; - } - } - } - - return VK_SUCCESS; -} - -void VmaBlockMetadata_Linear::Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) -{ - const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1; - const VmaSuballocation newSuballoc = { offset, request.size, userData, type }; - - switch (request.type) - { - case VmaAllocationRequestType::UpperAddress: - { - VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER && - "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - suballocations2nd.push_back(newSuballoc); - m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK; - } - break; - case VmaAllocationRequestType::EndOf1st: - { - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - - VMA_ASSERT(suballocations1st.empty() || - offset >= suballocations1st.back().offset + suballocations1st.back().size); - // Check if it fits before the end of the block. - VMA_ASSERT(offset + request.size <= GetSize()); - - suballocations1st.push_back(newSuballoc); - } - break; - case VmaAllocationRequestType::EndOf2nd: - { - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector. - VMA_ASSERT(!suballocations1st.empty() && - offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - - switch (m_2ndVectorMode) - { - case SECOND_VECTOR_EMPTY: - // First allocation from second part ring buffer. - VMA_ASSERT(suballocations2nd.empty()); - m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER; - break; - case SECOND_VECTOR_RING_BUFFER: - // 2-part ring buffer is already started. - VMA_ASSERT(!suballocations2nd.empty()); - break; - case SECOND_VECTOR_DOUBLE_STACK: - VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack."); - break; - default: - VMA_ASSERT(0); - } - - suballocations2nd.push_back(newSuballoc); - } - break; - default: - VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR."); - } - - m_SumFreeSize -= newSuballoc.size; -} - -void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle) -{ - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - VkDeviceSize offset = (VkDeviceSize)allocHandle - 1; - - if (!suballocations1st.empty()) - { - // First allocation: Mark it as next empty at the beginning. - VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; - if (firstSuballoc.offset == offset) - { - firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - firstSuballoc.userData = VMA_NULL; - m_SumFreeSize += firstSuballoc.size; - ++m_1stNullItemsBeginCount; - CleanupAfterFree(); - return; - } - } - - // Last allocation in 2-part ring buffer or top of upper stack (same logic). - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER || - m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - VmaSuballocation& lastSuballoc = suballocations2nd.back(); - if (lastSuballoc.offset == offset) - { - m_SumFreeSize += lastSuballoc.size; - suballocations2nd.pop_back(); - CleanupAfterFree(); - return; - } - } - // Last allocation in 1st vector. - else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY) - { - VmaSuballocation& lastSuballoc = suballocations1st.back(); - if (lastSuballoc.offset == offset) - { - m_SumFreeSize += lastSuballoc.size; - suballocations1st.pop_back(); - CleanupAfterFree(); - return; - } - } - - VmaSuballocation refSuballoc; - refSuballoc.offset = offset; - // Rest of members stays uninitialized intentionally for better performance. - - // Item from the middle of 1st vector. - { - const SuballocationVectorType::iterator it = VmaBinaryFindSorted( - suballocations1st.begin() + m_1stNullItemsBeginCount, - suballocations1st.end(), - refSuballoc, - VmaSuballocationOffsetLess()); - if (it != suballocations1st.end()) - { - it->type = VMA_SUBALLOCATION_TYPE_FREE; - it->userData = VMA_NULL; - ++m_1stNullItemsMiddleCount; - m_SumFreeSize += it->size; - CleanupAfterFree(); - return; - } - } - - if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) - { - // Item from the middle of 2nd vector. - const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? - VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : - VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); - if (it != suballocations2nd.end()) - { - it->type = VMA_SUBALLOCATION_TYPE_FREE; - it->userData = VMA_NULL; - ++m_2ndNullItemsCount; - m_SumFreeSize += it->size; - CleanupAfterFree(); - return; - } - } - - VMA_ASSERT(0 && "Allocation to free not found in linear allocator!"); -} - -void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) -{ - outInfo.offset = (VkDeviceSize)allocHandle - 1; - VmaSuballocation& suballoc = FindSuballocation(outInfo.offset); - outInfo.size = suballoc.size; - outInfo.pUserData = suballoc.userData; -} - -void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const -{ - return FindSuballocation((VkDeviceSize)allocHandle - 1).userData; -} - -VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const -{ - // Function only used for defragmentation, which is disabled for this algorithm - VMA_ASSERT(0); - return VK_NULL_HANDLE; -} - -VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const -{ - // Function only used for defragmentation, which is disabled for this algorithm - VMA_ASSERT(0); - return VK_NULL_HANDLE; -} - -VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const -{ - // Function only used for defragmentation, which is disabled for this algorithm - VMA_ASSERT(0); - return 0; -} - -void VmaBlockMetadata_Linear::Clear() -{ - m_SumFreeSize = GetSize(); - m_Suballocations0.clear(); - m_Suballocations1.clear(); - // Leaving m_1stVectorIndex unchanged - it doesn't matter. - m_2ndVectorMode = SECOND_VECTOR_EMPTY; - m_1stNullItemsBeginCount = 0; - m_1stNullItemsMiddleCount = 0; - m_2ndNullItemsCount = 0; -} - -void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) -{ - VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1); - suballoc.userData = userData; -} - -void VmaBlockMetadata_Linear::DebugLogAllAllocations() const -{ - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it) - if (it->type != VMA_SUBALLOCATION_TYPE_FREE) - DebugLogAllocation(it->offset, it->size, it->userData); - - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it) - if (it->type != VMA_SUBALLOCATION_TYPE_FREE) - DebugLogAllocation(it->offset, it->size, it->userData); -} - -VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const -{ - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - - VmaSuballocation refSuballoc; - refSuballoc.offset = offset; - // Rest of members stays uninitialized intentionally for better performance. - - // Item from the 1st vector. - { - SuballocationVectorType::const_iterator it = VmaBinaryFindSorted( - suballocations1st.begin() + m_1stNullItemsBeginCount, - suballocations1st.end(), - refSuballoc, - VmaSuballocationOffsetLess()); - if (it != suballocations1st.end()) - { - return const_cast(*it); - } - } - - if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) - { - // Rest of members stays uninitialized intentionally for better performance. - SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? - VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : - VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); - if (it != suballocations2nd.end()) - { - return const_cast(*it); - } - } - - VMA_ASSERT(0 && "Allocation not found in linear allocator!"); - return const_cast(suballocations1st.back()); // Should never occur. -} - -bool VmaBlockMetadata_Linear::ShouldCompact1st() const -{ - const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; - const size_t suballocCount = AccessSuballocations1st().size(); - return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3; -} - -void VmaBlockMetadata_Linear::CleanupAfterFree() -{ - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - - if (IsEmpty()) - { - suballocations1st.clear(); - suballocations2nd.clear(); - m_1stNullItemsBeginCount = 0; - m_1stNullItemsMiddleCount = 0; - m_2ndNullItemsCount = 0; - m_2ndVectorMode = SECOND_VECTOR_EMPTY; - } - else - { - const size_t suballoc1stCount = suballocations1st.size(); - const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; - VMA_ASSERT(nullItem1stCount <= suballoc1stCount); - - // Find more null items at the beginning of 1st vector. - while (m_1stNullItemsBeginCount < suballoc1stCount && - suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) - { - ++m_1stNullItemsBeginCount; - --m_1stNullItemsMiddleCount; - } - - // Find more null items at the end of 1st vector. - while (m_1stNullItemsMiddleCount > 0 && - suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE) - { - --m_1stNullItemsMiddleCount; - suballocations1st.pop_back(); - } - - // Find more null items at the end of 2nd vector. - while (m_2ndNullItemsCount > 0 && - suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE) - { - --m_2ndNullItemsCount; - suballocations2nd.pop_back(); - } - - // Find more null items at the beginning of 2nd vector. - while (m_2ndNullItemsCount > 0 && - suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE) - { - --m_2ndNullItemsCount; - VmaVectorRemove(suballocations2nd, 0); - } - - if (ShouldCompact1st()) - { - const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount; - size_t srcIndex = m_1stNullItemsBeginCount; - for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex) - { - while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE) - { - ++srcIndex; - } - if (dstIndex != srcIndex) - { - suballocations1st[dstIndex] = suballocations1st[srcIndex]; - } - ++srcIndex; - } - suballocations1st.resize(nonNullItemCount); - m_1stNullItemsBeginCount = 0; - m_1stNullItemsMiddleCount = 0; - } - - // 2nd vector became empty. - if (suballocations2nd.empty()) - { - m_2ndVectorMode = SECOND_VECTOR_EMPTY; - } - - // 1st vector became empty. - if (suballocations1st.size() - m_1stNullItemsBeginCount == 0) - { - suballocations1st.clear(); - m_1stNullItemsBeginCount = 0; - - if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - // Swap 1st with 2nd. Now 2nd is empty. - m_2ndVectorMode = SECOND_VECTOR_EMPTY; - m_1stNullItemsMiddleCount = m_2ndNullItemsCount; - while (m_1stNullItemsBeginCount < suballocations2nd.size() && - suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) - { - ++m_1stNullItemsBeginCount; - --m_1stNullItemsMiddleCount; - } - m_2ndNullItemsCount = 0; - m_1stVectorIndex ^= 1; - } - } - } - - VMA_HEAVY_ASSERT(Validate()); -} - -bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) -{ - const VkDeviceSize blockSize = GetSize(); - const VkDeviceSize debugMargin = GetDebugMargin(); - const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - - if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - // Try to allocate at the end of 1st vector. - - VkDeviceSize resultBaseOffset = 0; - if (!suballocations1st.empty()) - { - const VmaSuballocation& lastSuballoc = suballocations1st.back(); - resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; - } - - // Start from offset equal to beginning of free space. - VkDeviceSize resultOffset = resultBaseOffset; - - // Apply alignment. - resultOffset = VmaAlignUp(resultOffset, allocAlignment); - - // Check previous suballocations for BufferImageGranularity conflicts. - // Make bigger alignment if necessary. - if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty()) - { - bool bufferImageGranularityConflict = false; - for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) - { - const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; - if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) - { - bufferImageGranularityConflict = true; - break; - } - } - else - // Already on previous page. - break; - } - if (bufferImageGranularityConflict) - { - resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); - } - } - - const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? - suballocations2nd.back().offset : blockSize; - - // There is enough free space at the end after alignment. - if (resultOffset + allocSize + debugMargin <= freeSpaceEnd) - { - // Check next suballocations for BufferImageGranularity conflicts. - // If conflict exists, allocation cannot be made here. - if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) - { - const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; - if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) - { - return false; - } - } - else - { - // Already on previous page. - break; - } - } - } - - // All tests passed: Success. - pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); - // pAllocationRequest->item, customData unused. - pAllocationRequest->type = VmaAllocationRequestType::EndOf1st; - return true; - } - } - - // Wrap-around to end of 2nd vector. Try to allocate there, watching for the - // beginning of 1st vector as the end of free space. - if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - VMA_ASSERT(!suballocations1st.empty()); - - VkDeviceSize resultBaseOffset = 0; - if (!suballocations2nd.empty()) - { - const VmaSuballocation& lastSuballoc = suballocations2nd.back(); - resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; - } - - // Start from offset equal to beginning of free space. - VkDeviceSize resultOffset = resultBaseOffset; - - // Apply alignment. - resultOffset = VmaAlignUp(resultOffset, allocAlignment); - - // Check previous suballocations for BufferImageGranularity conflicts. - // Make bigger alignment if necessary. - if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) - { - bool bufferImageGranularityConflict = false; - for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; ) - { - const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex]; - if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) - { - bufferImageGranularityConflict = true; - break; - } - } - else - // Already on previous page. - break; - } - if (bufferImageGranularityConflict) - { - resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); - } - } - - size_t index1st = m_1stNullItemsBeginCount; - - // There is enough free space at the end after alignment. - if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) || - (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset)) - { - // Check next suballocations for BufferImageGranularity conflicts. - // If conflict exists, allocation cannot be made here. - if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) - { - for (size_t nextSuballocIndex = index1st; - nextSuballocIndex < suballocations1st.size(); - nextSuballocIndex++) - { - const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex]; - if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) - { - return false; - } - } - else - { - // Already on next page. - break; - } - } - } - - // All tests passed: Success. - pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); - pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd; - // pAllocationRequest->item, customData unused. - return true; - } - } - - return false; -} - -bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) -{ - const VkDeviceSize blockSize = GetSize(); - const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer."); - return false; - } - - // Try to allocate before 2nd.back(), or end of block if 2nd.empty(). - if (allocSize > blockSize) - { - return false; - } - VkDeviceSize resultBaseOffset = blockSize - allocSize; - if (!suballocations2nd.empty()) - { - const VmaSuballocation& lastSuballoc = suballocations2nd.back(); - resultBaseOffset = lastSuballoc.offset - allocSize; - if (allocSize > lastSuballoc.offset) - { - return false; - } - } - - // Start from offset equal to end of free space. - VkDeviceSize resultOffset = resultBaseOffset; - - const VkDeviceSize debugMargin = GetDebugMargin(); - - // Apply debugMargin at the end. - if (debugMargin > 0) - { - if (resultOffset < debugMargin) - { - return false; - } - resultOffset -= debugMargin; - } - - // Apply alignment. - resultOffset = VmaAlignDown(resultOffset, allocAlignment); - - // Check next suballocations from 2nd for BufferImageGranularity conflicts. - // Make bigger alignment if necessary. - if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) - { - bool bufferImageGranularityConflict = false; - for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) - { - const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; - if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) - { - bufferImageGranularityConflict = true; - break; - } - } - else - // Already on previous page. - break; - } - if (bufferImageGranularityConflict) - { - resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity); - } - } - - // There is enough free space. - const VkDeviceSize endOf1st = !suballocations1st.empty() ? - suballocations1st.back().offset + suballocations1st.back().size : - 0; - if (endOf1st + debugMargin <= resultOffset) - { - // Check previous suballocations for BufferImageGranularity conflicts. - // If conflict exists, allocation cannot be made here. - if (bufferImageGranularity > 1) - { - for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) - { - const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; - if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) - { - return false; - } - } - else - { - // Already on next page. - break; - } - } - } - - // All tests passed: Success. - pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); - // pAllocationRequest->item unused. - pAllocationRequest->type = VmaAllocationRequestType::UpperAddress; - return true; - } - - return false; -} -#endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS -#endif // _VMA_BLOCK_METADATA_LINEAR - -#if 0 -#ifndef _VMA_BLOCK_METADATA_BUDDY -/* -- GetSize() is the original size of allocated memory block. -- m_UsableSize is this size aligned down to a power of two. - All allocations and calculations happen relative to m_UsableSize. -- GetUnusableSize() is the difference between them. - It is reported as separate, unused range, not available for allocations. - -Node at level 0 has size = m_UsableSize. -Each next level contains nodes with size 2 times smaller than current level. -m_LevelCount is the maximum number of levels to use in the current object. -*/ -class VmaBlockMetadata_Buddy : public VmaBlockMetadata -{ - VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy) -public: - VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual); - virtual ~VmaBlockMetadata_Buddy(); - - size_t GetAllocationCount() const override { return m_AllocationCount; } - VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize + GetUnusableSize(); } - bool IsEmpty() const override { return m_Root->type == Node::TYPE_FREE; } - VkResult CheckCorruption(const void* pBlockData) override { return VK_ERROR_FEATURE_NOT_PRESENT; } - VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }; - void DebugLogAllAllocations() const override { DebugLogAllAllocationNode(m_Root, 0); } - - void Init(VkDeviceSize size) override; - bool Validate() const override; - - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; - void AddStatistics(VmaStatistics& inoutStats) const override; - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override; -#endif - - bool CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) override; - - void Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) override; - - void Free(VmaAllocHandle allocHandle) override; - void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; - void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; - VmaAllocHandle GetAllocationListBegin() const override; - VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; - void Clear() override; - void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; - -private: - static const size_t MAX_LEVELS = 48; - - struct ValidationContext - { - size_t calculatedAllocationCount = 0; - size_t calculatedFreeCount = 0; - VkDeviceSize calculatedSumFreeSize = 0; - }; - struct Node - { - VkDeviceSize offset; - enum TYPE - { - TYPE_FREE, - TYPE_ALLOCATION, - TYPE_SPLIT, - TYPE_COUNT - } type; - Node* parent; - Node* buddy; - - union - { - struct - { - Node* prev; - Node* next; - } free; - struct - { - void* userData; - } allocation; - struct - { - Node* leftChild; - } split; - }; - }; - - // Size of the memory block aligned down to a power of two. - VkDeviceSize m_UsableSize; - uint32_t m_LevelCount; - VmaPoolAllocator m_NodeAllocator; - Node* m_Root; - struct - { - Node* front; - Node* back; - } m_FreeList[MAX_LEVELS]; - - // Number of nodes in the tree with type == TYPE_ALLOCATION. - size_t m_AllocationCount; - // Number of nodes in the tree with type == TYPE_FREE. - size_t m_FreeCount; - // Doesn't include space wasted due to internal fragmentation - allocation sizes are just aligned up to node sizes. - // Doesn't include unusable size. - VkDeviceSize m_SumFreeSize; - - VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; } - VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; } - - VkDeviceSize AlignAllocationSize(VkDeviceSize size) const - { - if (!IsVirtual()) - { - size = VmaAlignUp(size, (VkDeviceSize)16); - } - return VmaNextPow2(size); - } - Node* FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const; - void DeleteNodeChildren(Node* node); - bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const; - uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const; - void AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const; - // Adds node to the front of FreeList at given level. - // node->type must be FREE. - // node->free.prev, next can be undefined. - void AddToFreeListFront(uint32_t level, Node* node); - // Removes node from FreeList at given level. - // node->type must be FREE. - // node->free.prev, next stay untouched. - void RemoveFromFreeList(uint32_t level, Node* node); - void DebugLogAllAllocationNode(Node* node, uint32_t level) const; - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const; -#endif -}; - -#ifndef _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS -VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual) - : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), - m_NodeAllocator(pAllocationCallbacks, 32), // firstBlockCapacity - m_Root(VMA_NULL), - m_AllocationCount(0), - m_FreeCount(1), - m_SumFreeSize(0) -{ - memset(m_FreeList, 0, sizeof(m_FreeList)); -} - -VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy() -{ - DeleteNodeChildren(m_Root); - m_NodeAllocator.Free(m_Root); -} - -void VmaBlockMetadata_Buddy::Init(VkDeviceSize size) -{ - VmaBlockMetadata::Init(size); - - m_UsableSize = VmaPrevPow2(size); - m_SumFreeSize = m_UsableSize; - - // Calculate m_LevelCount. - const VkDeviceSize minNodeSize = IsVirtual() ? 1 : 16; - m_LevelCount = 1; - while (m_LevelCount < MAX_LEVELS && - LevelToNodeSize(m_LevelCount) >= minNodeSize) - { - ++m_LevelCount; - } - - Node* rootNode = m_NodeAllocator.Alloc(); - rootNode->offset = 0; - rootNode->type = Node::TYPE_FREE; - rootNode->parent = VMA_NULL; - rootNode->buddy = VMA_NULL; - - m_Root = rootNode; - AddToFreeListFront(0, rootNode); -} - -bool VmaBlockMetadata_Buddy::Validate() const -{ - // Validate tree. - ValidationContext ctx; - if (!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0))) - { - VMA_VALIDATE(false && "ValidateNode failed."); - } - VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount); - VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize); - - // Validate free node lists. - for (uint32_t level = 0; level < m_LevelCount; ++level) - { - VMA_VALIDATE(m_FreeList[level].front == VMA_NULL || - m_FreeList[level].front->free.prev == VMA_NULL); - - for (Node* node = m_FreeList[level].front; - node != VMA_NULL; - node = node->free.next) - { - VMA_VALIDATE(node->type == Node::TYPE_FREE); - - if (node->free.next == VMA_NULL) - { - VMA_VALIDATE(m_FreeList[level].back == node); - } - else - { - VMA_VALIDATE(node->free.next->free.prev == node); - } - } - } - - // Validate that free lists ar higher levels are empty. - for (uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level) - { - VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL); - } - - return true; -} - -void VmaBlockMetadata_Buddy::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const -{ - inoutStats.statistics.blockCount++; - inoutStats.statistics.blockBytes += GetSize(); - - AddNodeToDetailedStatistics(inoutStats, m_Root, LevelToNodeSize(0)); - - const VkDeviceSize unusableSize = GetUnusableSize(); - if (unusableSize > 0) - VmaAddDetailedStatisticsUnusedRange(inoutStats, unusableSize); -} - -void VmaBlockMetadata_Buddy::AddStatistics(VmaStatistics& inoutStats) const -{ - inoutStats.blockCount++; - inoutStats.allocationCount += (uint32_t)m_AllocationCount; - inoutStats.blockBytes += GetSize(); - inoutStats.allocationBytes += GetSize() - m_SumFreeSize; -} - -#if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const -{ - VmaDetailedStatistics stats; - VmaClearDetailedStatistics(stats); - AddDetailedStatistics(stats); - - PrintDetailedMap_Begin( - json, - stats.statistics.blockBytes - stats.statistics.allocationBytes, - stats.statistics.allocationCount, - stats.unusedRangeCount, - mapRefCount); - - PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0)); - - const VkDeviceSize unusableSize = GetUnusableSize(); - if (unusableSize > 0) - { - PrintDetailedMap_UnusedRange(json, - m_UsableSize, // offset - unusableSize); // size - } - - PrintDetailedMap_End(json); -} -#endif // VMA_STATS_STRING_ENABLED - -bool VmaBlockMetadata_Buddy::CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) -{ - VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); - - allocSize = AlignAllocationSize(allocSize); - - // Simple way to respect bufferImageGranularity. May be optimized some day. - // Whenever it might be an OPTIMAL image... - if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || - allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || - allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) - { - allocAlignment = VMA_MAX(allocAlignment, GetBufferImageGranularity()); - allocSize = VmaAlignUp(allocSize, GetBufferImageGranularity()); - } - - if (allocSize > m_UsableSize) - { - return false; - } - - const uint32_t targetLevel = AllocSizeToLevel(allocSize); - for (uint32_t level = targetLevel; level--; ) - { - for (Node* freeNode = m_FreeList[level].front; - freeNode != VMA_NULL; - freeNode = freeNode->free.next) - { - if (freeNode->offset % allocAlignment == 0) - { - pAllocationRequest->type = VmaAllocationRequestType::Normal; - pAllocationRequest->allocHandle = (VmaAllocHandle)(freeNode->offset + 1); - pAllocationRequest->size = allocSize; - pAllocationRequest->customData = (void*)(uintptr_t)level; - return true; - } - } - } - - return false; -} - -void VmaBlockMetadata_Buddy::Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) -{ - VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); - - const uint32_t targetLevel = AllocSizeToLevel(request.size); - uint32_t currLevel = (uint32_t)(uintptr_t)request.customData; - - Node* currNode = m_FreeList[currLevel].front; - VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); - const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1; - while (currNode->offset != offset) - { - currNode = currNode->free.next; - VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); - } - - // Go down, splitting free nodes. - while (currLevel < targetLevel) - { - // currNode is already first free node at currLevel. - // Remove it from list of free nodes at this currLevel. - RemoveFromFreeList(currLevel, currNode); - - const uint32_t childrenLevel = currLevel + 1; - - // Create two free sub-nodes. - Node* leftChild = m_NodeAllocator.Alloc(); - Node* rightChild = m_NodeAllocator.Alloc(); - - leftChild->offset = currNode->offset; - leftChild->type = Node::TYPE_FREE; - leftChild->parent = currNode; - leftChild->buddy = rightChild; - - rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel); - rightChild->type = Node::TYPE_FREE; - rightChild->parent = currNode; - rightChild->buddy = leftChild; - - // Convert current currNode to split type. - currNode->type = Node::TYPE_SPLIT; - currNode->split.leftChild = leftChild; - - // Add child nodes to free list. Order is important! - AddToFreeListFront(childrenLevel, rightChild); - AddToFreeListFront(childrenLevel, leftChild); - - ++m_FreeCount; - ++currLevel; - currNode = m_FreeList[currLevel].front; - - /* - We can be sure that currNode, as left child of node previously split, - also fulfills the alignment requirement. - */ - } - - // Remove from free list. - VMA_ASSERT(currLevel == targetLevel && - currNode != VMA_NULL && - currNode->type == Node::TYPE_FREE); - RemoveFromFreeList(currLevel, currNode); - - // Convert to allocation node. - currNode->type = Node::TYPE_ALLOCATION; - currNode->allocation.userData = userData; - - ++m_AllocationCount; - --m_FreeCount; - m_SumFreeSize -= request.size; -} - -void VmaBlockMetadata_Buddy::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) -{ - uint32_t level = 0; - outInfo.offset = (VkDeviceSize)allocHandle - 1; - const Node* const node = FindAllocationNode(outInfo.offset, level); - outInfo.size = LevelToNodeSize(level); - outInfo.pUserData = node->allocation.userData; -} - -void* VmaBlockMetadata_Buddy::GetAllocationUserData(VmaAllocHandle allocHandle) const -{ - uint32_t level = 0; - const Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level); - return node->allocation.userData; -} - -VmaAllocHandle VmaBlockMetadata_Buddy::GetAllocationListBegin() const -{ - // Function only used for defragmentation, which is disabled for this algorithm - return VK_NULL_HANDLE; -} - -VmaAllocHandle VmaBlockMetadata_Buddy::GetNextAllocation(VmaAllocHandle prevAlloc) const -{ - // Function only used for defragmentation, which is disabled for this algorithm - return VK_NULL_HANDLE; -} - -void VmaBlockMetadata_Buddy::DeleteNodeChildren(Node* node) -{ - if (node->type == Node::TYPE_SPLIT) - { - DeleteNodeChildren(node->split.leftChild->buddy); - DeleteNodeChildren(node->split.leftChild); - const VkAllocationCallbacks* allocationCallbacks = GetAllocationCallbacks(); - m_NodeAllocator.Free(node->split.leftChild->buddy); - m_NodeAllocator.Free(node->split.leftChild); - } -} - -void VmaBlockMetadata_Buddy::Clear() -{ - DeleteNodeChildren(m_Root); - m_Root->type = Node::TYPE_FREE; - m_AllocationCount = 0; - m_FreeCount = 1; - m_SumFreeSize = m_UsableSize; -} - -void VmaBlockMetadata_Buddy::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) -{ - uint32_t level = 0; - Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level); - node->allocation.userData = userData; -} - -VmaBlockMetadata_Buddy::Node* VmaBlockMetadata_Buddy::FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const -{ - Node* node = m_Root; - VkDeviceSize nodeOffset = 0; - outLevel = 0; - VkDeviceSize levelNodeSize = LevelToNodeSize(0); - while (node->type == Node::TYPE_SPLIT) - { - const VkDeviceSize nextLevelNodeSize = levelNodeSize >> 1; - if (offset < nodeOffset + nextLevelNodeSize) - { - node = node->split.leftChild; - } - else - { - node = node->split.leftChild->buddy; - nodeOffset += nextLevelNodeSize; - } - ++outLevel; - levelNodeSize = nextLevelNodeSize; - } - - VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION); - return node; -} - -bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const -{ - VMA_VALIDATE(level < m_LevelCount); - VMA_VALIDATE(curr->parent == parent); - VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL)); - VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr); - switch (curr->type) - { - case Node::TYPE_FREE: - // curr->free.prev, next are validated separately. - ctx.calculatedSumFreeSize += levelNodeSize; - ++ctx.calculatedFreeCount; - break; - case Node::TYPE_ALLOCATION: - ++ctx.calculatedAllocationCount; - if (!IsVirtual()) - { - VMA_VALIDATE(curr->allocation.userData != VMA_NULL); - } - break; - case Node::TYPE_SPLIT: - { - const uint32_t childrenLevel = level + 1; - const VkDeviceSize childrenLevelNodeSize = levelNodeSize >> 1; - const Node* const leftChild = curr->split.leftChild; - VMA_VALIDATE(leftChild != VMA_NULL); - VMA_VALIDATE(leftChild->offset == curr->offset); - if (!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize)) - { - VMA_VALIDATE(false && "ValidateNode for left child failed."); - } - const Node* const rightChild = leftChild->buddy; - VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize); - if (!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize)) - { - VMA_VALIDATE(false && "ValidateNode for right child failed."); - } - } - break; - default: - return false; - } - - return true; -} - -uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const -{ - // I know this could be optimized somehow e.g. by using std::log2p1 from C++20. - uint32_t level = 0; - VkDeviceSize currLevelNodeSize = m_UsableSize; - VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1; - while (allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount) - { - ++level; - currLevelNodeSize >>= 1; - nextLevelNodeSize >>= 1; - } - return level; -} - -void VmaBlockMetadata_Buddy::Free(VmaAllocHandle allocHandle) -{ - uint32_t level = 0; - Node* node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level); - - ++m_FreeCount; - --m_AllocationCount; - m_SumFreeSize += LevelToNodeSize(level); - - node->type = Node::TYPE_FREE; - - // Join free nodes if possible. - while (level > 0 && node->buddy->type == Node::TYPE_FREE) - { - RemoveFromFreeList(level, node->buddy); - Node* const parent = node->parent; - - m_NodeAllocator.Free(node->buddy); - m_NodeAllocator.Free(node); - parent->type = Node::TYPE_FREE; - - node = parent; - --level; - --m_FreeCount; - } - - AddToFreeListFront(level, node); -} - -void VmaBlockMetadata_Buddy::AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const -{ - switch (node->type) - { - case Node::TYPE_FREE: - VmaAddDetailedStatisticsUnusedRange(inoutStats, levelNodeSize); - break; - case Node::TYPE_ALLOCATION: - VmaAddDetailedStatisticsAllocation(inoutStats, levelNodeSize); - break; - case Node::TYPE_SPLIT: - { - const VkDeviceSize childrenNodeSize = levelNodeSize / 2; - const Node* const leftChild = node->split.leftChild; - AddNodeToDetailedStatistics(inoutStats, leftChild, childrenNodeSize); - const Node* const rightChild = leftChild->buddy; - AddNodeToDetailedStatistics(inoutStats, rightChild, childrenNodeSize); - } - break; - default: - VMA_ASSERT(0); - } -} - -void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node) -{ - VMA_ASSERT(node->type == Node::TYPE_FREE); - - // List is empty. - Node* const frontNode = m_FreeList[level].front; - if (frontNode == VMA_NULL) - { - VMA_ASSERT(m_FreeList[level].back == VMA_NULL); - node->free.prev = node->free.next = VMA_NULL; - m_FreeList[level].front = m_FreeList[level].back = node; - } - else - { - VMA_ASSERT(frontNode->free.prev == VMA_NULL); - node->free.prev = VMA_NULL; - node->free.next = frontNode; - frontNode->free.prev = node; - m_FreeList[level].front = node; - } -} - -void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node) -{ - VMA_ASSERT(m_FreeList[level].front != VMA_NULL); - - // It is at the front. - if (node->free.prev == VMA_NULL) - { - VMA_ASSERT(m_FreeList[level].front == node); - m_FreeList[level].front = node->free.next; - } - else - { - Node* const prevFreeNode = node->free.prev; - VMA_ASSERT(prevFreeNode->free.next == node); - prevFreeNode->free.next = node->free.next; - } - - // It is at the back. - if (node->free.next == VMA_NULL) - { - VMA_ASSERT(m_FreeList[level].back == node); - m_FreeList[level].back = node->free.prev; - } - else - { - Node* const nextFreeNode = node->free.next; - VMA_ASSERT(nextFreeNode->free.prev == node); - nextFreeNode->free.prev = node->free.prev; - } -} - -void VmaBlockMetadata_Buddy::DebugLogAllAllocationNode(Node* node, uint32_t level) const -{ - switch (node->type) - { - case Node::TYPE_FREE: - break; - case Node::TYPE_ALLOCATION: - DebugLogAllocation(node->offset, LevelToNodeSize(level), node->allocation.userData); - break; - case Node::TYPE_SPLIT: - { - ++level; - DebugLogAllAllocationNode(node->split.leftChild, level); - DebugLogAllAllocationNode(node->split.leftChild->buddy, level); - } - break; - default: - VMA_ASSERT(0); - } -} - -#if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const -{ - switch (node->type) - { - case Node::TYPE_FREE: - PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize); - break; - case Node::TYPE_ALLOCATION: - PrintDetailedMap_Allocation(json, node->offset, levelNodeSize, node->allocation.userData); - break; - case Node::TYPE_SPLIT: - { - const VkDeviceSize childrenNodeSize = levelNodeSize / 2; - const Node* const leftChild = node->split.leftChild; - PrintDetailedMapNode(json, leftChild, childrenNodeSize); - const Node* const rightChild = leftChild->buddy; - PrintDetailedMapNode(json, rightChild, childrenNodeSize); - } - break; - default: - VMA_ASSERT(0); - } -} -#endif // VMA_STATS_STRING_ENABLED -#endif // _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS -#endif // _VMA_BLOCK_METADATA_BUDDY -#endif // #if 0 - -#ifndef _VMA_BLOCK_METADATA_TLSF -// To not search current larger region if first allocation won't succeed and skip to smaller range -// use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest(). -// When fragmentation and reusal of previous blocks doesn't matter then use with -// VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible. -class VmaBlockMetadata_TLSF : public VmaBlockMetadata -{ - VMA_CLASS_NO_COPY(VmaBlockMetadata_TLSF) -public: - VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual); - virtual ~VmaBlockMetadata_TLSF(); - - size_t GetAllocationCount() const override { return m_AllocCount; } - size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; } - VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; } - bool IsEmpty() const override { return m_NullBlock->offset == 0; } - VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; }; - - void Init(VkDeviceSize size) override; - bool Validate() const override; - - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; - void AddStatistics(VmaStatistics& inoutStats) const override; - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json) const override; -#endif - - bool CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) override; - - VkResult CheckCorruption(const void* pBlockData) override; - void Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) override; - - void Free(VmaAllocHandle allocHandle) override; - void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; - void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; - VmaAllocHandle GetAllocationListBegin() const override; - VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; - VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; - void Clear() override; - void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; - void DebugLogAllAllocations() const override; - -private: - // According to original paper it should be preferable 4 or 5: - // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems" - // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf - static const uint8_t SECOND_LEVEL_INDEX = 5; - static const uint16_t SMALL_BUFFER_SIZE = 256; - static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16; - static const uint8_t MEMORY_CLASS_SHIFT = 7; - static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT; - - class Block - { - public: - VkDeviceSize offset; - VkDeviceSize size; - Block* prevPhysical; - Block* nextPhysical; - - void MarkFree() { prevFree = VMA_NULL; } - void MarkTaken() { prevFree = this; } - bool IsFree() const { return prevFree != this; } - void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; } - Block*& PrevFree() { return prevFree; } - Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; } - - private: - Block* prevFree; // Address of the same block here indicates that block is taken - union - { - Block* nextFree; - void* userData; - }; - }; - - size_t m_AllocCount; - // Total number of free blocks besides null block - size_t m_BlocksFreeCount; - // Total size of free blocks excluding null block - VkDeviceSize m_BlocksFreeSize; - uint32_t m_IsFreeBitmap; - uint8_t m_MemoryClasses; - uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES]; - uint32_t m_ListsCount; - /* - * 0: 0-3 lists for small buffers - * 1+: 0-(2^SLI-1) lists for normal buffers - */ - Block** m_FreeList; - VmaPoolAllocator m_BlockAllocator; - Block* m_NullBlock; - VmaBlockBufferImageGranularity m_GranularityHandler; - - uint8_t SizeToMemoryClass(VkDeviceSize size) const; - uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const; - uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const; - uint32_t GetListIndex(VkDeviceSize size) const; - - void RemoveFreeBlock(Block* block); - void InsertFreeBlock(Block* block); - void MergeBlock(Block* block, Block* prev); - - Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const; - bool CheckBlock( - Block& block, - uint32_t listIndex, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - VmaAllocationRequest* pAllocationRequest); -}; - -#ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS -VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual) - : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), - m_AllocCount(0), - m_BlocksFreeCount(0), - m_BlocksFreeSize(0), - m_IsFreeBitmap(0), - m_MemoryClasses(0), - m_ListsCount(0), - m_FreeList(VMA_NULL), - m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT), - m_NullBlock(VMA_NULL), - m_GranularityHandler(bufferImageGranularity) {} - -VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF() -{ - if (m_FreeList) - vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount); - m_GranularityHandler.Destroy(GetAllocationCallbacks()); -} - -void VmaBlockMetadata_TLSF::Init(VkDeviceSize size) -{ - VmaBlockMetadata::Init(size); - - if (!IsVirtual()) - m_GranularityHandler.Init(GetAllocationCallbacks(), size); - - m_NullBlock = m_BlockAllocator.Alloc(); - m_NullBlock->size = size; - m_NullBlock->offset = 0; - m_NullBlock->prevPhysical = VMA_NULL; - m_NullBlock->nextPhysical = VMA_NULL; - m_NullBlock->MarkFree(); - m_NullBlock->NextFree() = VMA_NULL; - m_NullBlock->PrevFree() = VMA_NULL; - uint8_t memoryClass = SizeToMemoryClass(size); - uint16_t sli = SizeToSecondIndex(size, memoryClass); - m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1; - if (IsVirtual()) - m_ListsCount += 1UL << SECOND_LEVEL_INDEX; - else - m_ListsCount += 4; - - m_MemoryClasses = memoryClass + 2; - memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t)); - - m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount); - memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); -} - -bool VmaBlockMetadata_TLSF::Validate() const -{ - VMA_VALIDATE(GetSumFreeSize() <= GetSize()); - - VkDeviceSize calculatedSize = m_NullBlock->size; - VkDeviceSize calculatedFreeSize = m_NullBlock->size; - size_t allocCount = 0; - size_t freeCount = 0; - - // Check integrity of free lists - for (uint32_t list = 0; list < m_ListsCount; ++list) - { - Block* block = m_FreeList[list]; - if (block != VMA_NULL) - { - VMA_VALIDATE(block->IsFree()); - VMA_VALIDATE(block->PrevFree() == VMA_NULL); - while (block->NextFree()) - { - VMA_VALIDATE(block->NextFree()->IsFree()); - VMA_VALIDATE(block->NextFree()->PrevFree() == block); - block = block->NextFree(); - } - } - } - - VkDeviceSize nextOffset = m_NullBlock->offset; - auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual()); - - VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL); - if (m_NullBlock->prevPhysical) - { - VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock); - } - // Check all blocks - for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical) - { - VMA_VALIDATE(prev->offset + prev->size == nextOffset); - nextOffset = prev->offset; - calculatedSize += prev->size; - - uint32_t listIndex = GetListIndex(prev->size); - if (prev->IsFree()) - { - ++freeCount; - // Check if free block belongs to free list - Block* freeBlock = m_FreeList[listIndex]; - VMA_VALIDATE(freeBlock != VMA_NULL); - - bool found = false; - do - { - if (freeBlock == prev) - found = true; - - freeBlock = freeBlock->NextFree(); - } while (!found && freeBlock != VMA_NULL); - - VMA_VALIDATE(found); - calculatedFreeSize += prev->size; - } - else - { - ++allocCount; - // Check if taken block is not on a free list - Block* freeBlock = m_FreeList[listIndex]; - while (freeBlock) - { - VMA_VALIDATE(freeBlock != prev); - freeBlock = freeBlock->NextFree(); - } - - if (!IsVirtual()) - { - VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size)); - } - } - - if (prev->prevPhysical) - { - VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev); - } - } - - if (!IsVirtual()) - { - VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx)); - } - - VMA_VALIDATE(nextOffset == 0); - VMA_VALIDATE(calculatedSize == GetSize()); - VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize()); - VMA_VALIDATE(allocCount == m_AllocCount); - VMA_VALIDATE(freeCount == m_BlocksFreeCount); - - return true; -} - -void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const -{ - inoutStats.statistics.blockCount++; - inoutStats.statistics.blockBytes += GetSize(); - if (m_NullBlock->size > 0) - VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size); - - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) - { - if (block->IsFree()) - VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size); - else - VmaAddDetailedStatisticsAllocation(inoutStats, block->size); - } -} - -void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const -{ - inoutStats.blockCount++; - inoutStats.allocationCount += (uint32_t)m_AllocCount; - inoutStats.blockBytes += GetSize(); - inoutStats.allocationBytes += GetSize() - GetSumFreeSize(); -} - -#if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const -{ - size_t blockCount = m_AllocCount + m_BlocksFreeCount; - VmaStlAllocator allocator(GetAllocationCallbacks()); - VmaVector> blockList(blockCount, allocator); - - size_t i = blockCount; - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) - { - blockList[--i] = block; - } - VMA_ASSERT(i == 0); - - VmaDetailedStatistics stats; - VmaClearDetailedStatistics(stats); - AddDetailedStatistics(stats); - - PrintDetailedMap_Begin(json, - stats.statistics.blockBytes - stats.statistics.allocationBytes, - stats.statistics.allocationCount, - stats.unusedRangeCount); - - for (; i < blockCount; ++i) - { - Block* block = blockList[i]; - if (block->IsFree()) - PrintDetailedMap_UnusedRange(json, block->offset, block->size); - else - PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData()); - } - if (m_NullBlock->size > 0) - PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size); - - PrintDetailedMap_End(json); -} -#endif - -bool VmaBlockMetadata_TLSF::CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) -{ - VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!"); - VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); - - // For small granularity round up - if (!IsVirtual()) - m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment); - - allocSize += GetDebugMargin(); - // Quick check for too small pool - if (allocSize > GetSumFreeSize()) - return false; - - // If no free blocks in pool then check only null block - if (m_BlocksFreeCount == 0) - return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest); - - // Round up to the next block - VkDeviceSize sizeForNextList = allocSize; - VkDeviceSize smallSizeStep = SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4); - if (allocSize > SMALL_BUFFER_SIZE) - { - sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX)); - } - else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep) - sizeForNextList = SMALL_BUFFER_SIZE + 1; - else - sizeForNextList += smallSizeStep; - - uint32_t nextListIndex = 0; - uint32_t prevListIndex = 0; - Block* nextListBlock = VMA_NULL; - Block* prevListBlock = VMA_NULL; - - // Check blocks according to strategies - if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) - { - // Quick check for larger block first - nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); - if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - - // If not fitted then null block - if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - - // Null block failed, search larger bucket - while (nextListBlock) - { - if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - nextListBlock = nextListBlock->NextFree(); - } - - // Failed again, check best fit bucket - prevListBlock = FindFreeBlock(allocSize, prevListIndex); - while (prevListBlock) - { - if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - prevListBlock = prevListBlock->NextFree(); - } - } - else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT) - { - // Check best fit bucket - prevListBlock = FindFreeBlock(allocSize, prevListIndex); - while (prevListBlock) - { - if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - prevListBlock = prevListBlock->NextFree(); - } - - // If failed check null block - if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - - // Check larger bucket - nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); - while (nextListBlock) - { - if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - nextListBlock = nextListBlock->NextFree(); - } - } - else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ) - { - // Perform search from the start - VmaStlAllocator allocator(GetAllocationCallbacks()); - VmaVector> blockList(m_BlocksFreeCount, allocator); - - size_t i = m_BlocksFreeCount; - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) - { - if (block->IsFree() && block->size >= allocSize) - blockList[--i] = block; - } - - for (; i < m_BlocksFreeCount; ++i) - { - Block& block = *blockList[i]; - if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - } - - // If failed check null block - if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - - // Whole range searched, no more memory - return false; - } - else - { - // Check larger bucket - nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); - while (nextListBlock) - { - if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - nextListBlock = nextListBlock->NextFree(); - } - - // If failed check null block - if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - - // Check best fit bucket - prevListBlock = FindFreeBlock(allocSize, prevListIndex); - while (prevListBlock) - { - if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - prevListBlock = prevListBlock->NextFree(); - } - } - - // Worst case, full search has to be done - while (++nextListIndex < m_ListsCount) - { - nextListBlock = m_FreeList[nextListIndex]; - while (nextListBlock) - { - if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - nextListBlock = nextListBlock->NextFree(); - } - } - - // No more memory sadly - return false; -} - -VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData) -{ - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) - { - if (!block->IsFree()) - { - if (!VmaValidateMagicValue(pBlockData, block->offset + block->size)) - { - VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); - return VK_ERROR_UNKNOWN_COPY; - } - } - } - - return VK_SUCCESS; -} - -void VmaBlockMetadata_TLSF::Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) -{ - VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF); - - // Get block and pop it from the free list - Block* currentBlock = (Block*)request.allocHandle; - VkDeviceSize offset = request.algorithmData; - VMA_ASSERT(currentBlock != VMA_NULL); - VMA_ASSERT(currentBlock->offset <= offset); - - if (currentBlock != m_NullBlock) - RemoveFreeBlock(currentBlock); - - VkDeviceSize debugMargin = GetDebugMargin(); - VkDeviceSize misssingAlignment = offset - currentBlock->offset; - - // Append missing alignment to prev block or create new one - if (misssingAlignment) - { - Block* prevBlock = currentBlock->prevPhysical; - VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!"); - - if (prevBlock->IsFree() && prevBlock->size != debugMargin) - { - uint32_t oldList = GetListIndex(prevBlock->size); - prevBlock->size += misssingAlignment; - // Check if new size crosses list bucket - if (oldList != GetListIndex(prevBlock->size)) - { - prevBlock->size -= misssingAlignment; - RemoveFreeBlock(prevBlock); - prevBlock->size += misssingAlignment; - InsertFreeBlock(prevBlock); - } - else - m_BlocksFreeSize += misssingAlignment; - } - else - { - Block* newBlock = m_BlockAllocator.Alloc(); - currentBlock->prevPhysical = newBlock; - prevBlock->nextPhysical = newBlock; - newBlock->prevPhysical = prevBlock; - newBlock->nextPhysical = currentBlock; - newBlock->size = misssingAlignment; - newBlock->offset = currentBlock->offset; - newBlock->MarkTaken(); - - InsertFreeBlock(newBlock); - } - - currentBlock->size -= misssingAlignment; - currentBlock->offset += misssingAlignment; - } - - VkDeviceSize size = request.size + debugMargin; - if (currentBlock->size == size) - { - if (currentBlock == m_NullBlock) - { - // Setup new null block - m_NullBlock = m_BlockAllocator.Alloc(); - m_NullBlock->size = 0; - m_NullBlock->offset = currentBlock->offset + size; - m_NullBlock->prevPhysical = currentBlock; - m_NullBlock->nextPhysical = VMA_NULL; - m_NullBlock->MarkFree(); - m_NullBlock->PrevFree() = VMA_NULL; - m_NullBlock->NextFree() = VMA_NULL; - currentBlock->nextPhysical = m_NullBlock; - currentBlock->MarkTaken(); - } - } - else - { - VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!"); - - // Create new free block - Block* newBlock = m_BlockAllocator.Alloc(); - newBlock->size = currentBlock->size - size; - newBlock->offset = currentBlock->offset + size; - newBlock->prevPhysical = currentBlock; - newBlock->nextPhysical = currentBlock->nextPhysical; - currentBlock->nextPhysical = newBlock; - currentBlock->size = size; - - if (currentBlock == m_NullBlock) - { - m_NullBlock = newBlock; - m_NullBlock->MarkFree(); - m_NullBlock->NextFree() = VMA_NULL; - m_NullBlock->PrevFree() = VMA_NULL; - currentBlock->MarkTaken(); - } - else - { - newBlock->nextPhysical->prevPhysical = newBlock; - newBlock->MarkTaken(); - InsertFreeBlock(newBlock); - } - } - currentBlock->UserData() = userData; - - if (debugMargin > 0) - { - currentBlock->size -= debugMargin; - Block* newBlock = m_BlockAllocator.Alloc(); - newBlock->size = debugMargin; - newBlock->offset = currentBlock->offset + currentBlock->size; - newBlock->prevPhysical = currentBlock; - newBlock->nextPhysical = currentBlock->nextPhysical; - newBlock->MarkTaken(); - currentBlock->nextPhysical->prevPhysical = newBlock; - currentBlock->nextPhysical = newBlock; - InsertFreeBlock(newBlock); - } - - if (!IsVirtual()) - m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData, - currentBlock->offset, currentBlock->size); - ++m_AllocCount; -} - -void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle) -{ - Block* block = (Block*)allocHandle; - Block* next = block->nextPhysical; - VMA_ASSERT(!block->IsFree() && "Block is already free!"); - - if (!IsVirtual()) - m_GranularityHandler.FreePages(block->offset, block->size); - --m_AllocCount; - - VkDeviceSize debugMargin = GetDebugMargin(); - if (debugMargin > 0) - { - RemoveFreeBlock(next); - MergeBlock(next, block); - block = next; - next = next->nextPhysical; - } - - // Try merging - Block* prev = block->prevPhysical; - if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin) - { - RemoveFreeBlock(prev); - MergeBlock(block, prev); - } - - if (!next->IsFree()) - InsertFreeBlock(block); - else if (next == m_NullBlock) - MergeBlock(m_NullBlock, block); - else - { - RemoveFreeBlock(next); - MergeBlock(next, block); - InsertFreeBlock(next); - } -} - -void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) -{ - Block* block = (Block*)allocHandle; - VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!"); - outInfo.offset = block->offset; - outInfo.size = block->size; - outInfo.pUserData = block->UserData(); -} - -void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const -{ - Block* block = (Block*)allocHandle; - VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!"); - return block->UserData(); -} - -VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const -{ - if (m_AllocCount == 0) - return VK_NULL_HANDLE; - - for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical) - { - if (!block->IsFree()) - return (VmaAllocHandle)block; - } - VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!"); - return VK_NULL_HANDLE; -} - -VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const -{ - Block* startBlock = (Block*)prevAlloc; - VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!"); - - for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical) - { - if (!block->IsFree()) - return (VmaAllocHandle)block; - } - return VK_NULL_HANDLE; -} - -VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const -{ - Block* block = (Block*)alloc; - VMA_ASSERT(!block->IsFree() && "Incorrect block!"); - - if (block->prevPhysical) - return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0; - return 0; -} - -void VmaBlockMetadata_TLSF::Clear() -{ - m_AllocCount = 0; - m_BlocksFreeCount = 0; - m_BlocksFreeSize = 0; - m_IsFreeBitmap = 0; - m_NullBlock->offset = 0; - m_NullBlock->size = GetSize(); - Block* block = m_NullBlock->prevPhysical; - m_NullBlock->prevPhysical = VMA_NULL; - while (block) - { - Block* prev = block->prevPhysical; - m_BlockAllocator.Free(block); - block = prev; - } - memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); - memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t)); - m_GranularityHandler.Clear(); -} - -void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) -{ - Block* block = (Block*)allocHandle; - VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!"); - block->UserData() = userData; -} - -void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const -{ - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) - if (!block->IsFree()) - DebugLogAllocation(block->offset, block->size, block->UserData()); -} - -uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const -{ - if (size > SMALL_BUFFER_SIZE) - return VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT; - return 0; -} - -uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const -{ - if (memoryClass == 0) - { - if (IsVirtual()) - return static_cast((size - 1) / 8); - else - return static_cast((size - 1) / 64); - } - return static_cast((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX)); -} - -uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const -{ - if (memoryClass == 0) - return secondIndex; - - const uint32_t index = static_cast(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex; - if (IsVirtual()) - return index + (1 << SECOND_LEVEL_INDEX); - else - return index + 4; -} - -uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const -{ - uint8_t memoryClass = SizeToMemoryClass(size); - return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass)); -} - -void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block) -{ - VMA_ASSERT(block != m_NullBlock); - VMA_ASSERT(block->IsFree()); - - if (block->NextFree() != VMA_NULL) - block->NextFree()->PrevFree() = block->PrevFree(); - if (block->PrevFree() != VMA_NULL) - block->PrevFree()->NextFree() = block->NextFree(); - else - { - uint8_t memClass = SizeToMemoryClass(block->size); - uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); - uint32_t index = GetListIndex(memClass, secondIndex); - VMA_ASSERT(m_FreeList[index] == block); - m_FreeList[index] = block->NextFree(); - if (block->NextFree() == VMA_NULL) - { - m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex); - if (m_InnerIsFreeBitmap[memClass] == 0) - m_IsFreeBitmap &= ~(1UL << memClass); - } - } - block->MarkTaken(); - block->UserData() = VMA_NULL; - --m_BlocksFreeCount; - m_BlocksFreeSize -= block->size; -} - -void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block) -{ - VMA_ASSERT(block != m_NullBlock); - VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!"); - - uint8_t memClass = SizeToMemoryClass(block->size); - uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); - uint32_t index = GetListIndex(memClass, secondIndex); - VMA_ASSERT(index < m_ListsCount); - block->PrevFree() = VMA_NULL; - block->NextFree() = m_FreeList[index]; - m_FreeList[index] = block; - if (block->NextFree() != VMA_NULL) - block->NextFree()->PrevFree() = block; - else - { - m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex; - m_IsFreeBitmap |= 1UL << memClass; - } - ++m_BlocksFreeCount; - m_BlocksFreeSize += block->size; -} - -void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev) -{ - VMA_ASSERT(block->prevPhysical == prev && "Cannot merge seperate physical regions!"); - VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!"); - - block->offset = prev->offset; - block->size += prev->size; - block->prevPhysical = prev->prevPhysical; - if (block->prevPhysical) - block->prevPhysical->nextPhysical = block; - m_BlockAllocator.Free(prev); -} - -VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const -{ - uint8_t memoryClass = SizeToMemoryClass(size); - uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass)); - if (!innerFreeMap) - { - // Check higher levels for avaiable blocks - uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1)); - if (!freeMap) - return VMA_NULL; // No more memory avaible - - // Find lowest free region - memoryClass = VMA_BITSCAN_LSB(freeMap); - innerFreeMap = m_InnerIsFreeBitmap[memoryClass]; - VMA_ASSERT(innerFreeMap != 0); - } - // Find lowest free subregion - listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap)); - VMA_ASSERT(m_FreeList[listIndex]); - return m_FreeList[listIndex]; -} - -bool VmaBlockMetadata_TLSF::CheckBlock( - Block& block, - uint32_t listIndex, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - VmaAllocationRequest* pAllocationRequest) -{ - VMA_ASSERT(block.IsFree() && "Block is already taken!"); - - VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment); - if (block.size < allocSize + alignedOffset - block.offset) - return false; - - // Check for granularity conflicts - if (!IsVirtual() && - m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType)) - return false; - - // Alloc successful - pAllocationRequest->type = VmaAllocationRequestType::TLSF; - pAllocationRequest->allocHandle = (VmaAllocHandle)█ - pAllocationRequest->size = allocSize - GetDebugMargin(); - pAllocationRequest->customData = (void*)allocType; - pAllocationRequest->algorithmData = alignedOffset; - - // Place block at the start of list if it's normal block - if (listIndex != m_ListsCount && block.PrevFree()) - { - block.PrevFree()->NextFree() = block.NextFree(); - if (block.NextFree()) - block.NextFree()->PrevFree() = block.PrevFree(); - block.PrevFree() = VMA_NULL; - block.NextFree() = m_FreeList[listIndex]; - m_FreeList[listIndex] = █ - if (block.NextFree()) - block.NextFree()->PrevFree() = █ - } - - return true; -} -#endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS -#endif // _VMA_BLOCK_METADATA_TLSF - -#ifndef _VMA_BLOCK_VECTOR -/* -Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific -Vulkan memory type. - -Synchronized internally with a mutex. -*/ -class VmaBlockVector -{ - friend struct VmaDefragmentationContext_T; - VMA_CLASS_NO_COPY(VmaBlockVector) -public: - VmaBlockVector( - VmaAllocator hAllocator, - VmaPool hParentPool, - uint32_t memoryTypeIndex, - VkDeviceSize preferredBlockSize, - size_t minBlockCount, - size_t maxBlockCount, - VkDeviceSize bufferImageGranularity, - bool explicitBlockSize, - uint32_t algorithm, - float priority, - VkDeviceSize minAllocationAlignment, - void* pMemoryAllocateNext); - ~VmaBlockVector(); - - VmaAllocator GetAllocator() const { return m_hAllocator; } - VmaPool GetParentPool() const { return m_hParentPool; } - bool IsCustomPool() const { return m_hParentPool != VMA_NULL; } - uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } - VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; } - VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } - uint32_t GetAlgorithm() const { return m_Algorithm; } - bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; } - float GetPriority() const { return m_Priority; } - const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; } - // To be used only while the m_Mutex is locked. Used during defragmentation. - size_t GetBlockCount() const { return m_Blocks.size(); } - // To be used only while the m_Mutex is locked. Used during defragmentation. - VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } - VMA_RW_MUTEX &GetMutex() { return m_Mutex; } - - VkResult CreateMinBlocks(); - void AddStatistics(VmaStatistics& inoutStats); - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); - bool IsEmpty(); - bool IsCorruptionDetectionEnabled() const; - - VkResult Allocate( - VkDeviceSize size, - VkDeviceSize alignment, - const VmaAllocationCreateInfo& createInfo, - VmaSuballocationType suballocType, - size_t allocationCount, - VmaAllocation* pAllocations); - - void Free(const VmaAllocation hAllocation); - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json); -#endif - - VkResult CheckCorruption(); - -private: - const VmaAllocator m_hAllocator; - const VmaPool m_hParentPool; - const uint32_t m_MemoryTypeIndex; - const VkDeviceSize m_PreferredBlockSize; - const size_t m_MinBlockCount; - const size_t m_MaxBlockCount; - const VkDeviceSize m_BufferImageGranularity; - const bool m_ExplicitBlockSize; - const uint32_t m_Algorithm; - const float m_Priority; - const VkDeviceSize m_MinAllocationAlignment; - - void* const m_pMemoryAllocateNext; - VMA_RW_MUTEX m_Mutex; - // Incrementally sorted by sumFreeSize, ascending. - VmaVector> m_Blocks; - uint32_t m_NextBlockId; - bool m_IncrementalSort = true; - - void SetIncrementalSort(bool val) { m_IncrementalSort = val; } - - VkDeviceSize CalcMaxBlockSize() const; - // Finds and removes given block from vector. - void Remove(VmaDeviceMemoryBlock* pBlock); - // Performs single step in sorting m_Blocks. They may not be fully sorted - // after this call. - void IncrementallySortBlocks(); - void SortByFreeSize(); - - VkResult AllocatePage( - VkDeviceSize size, - VkDeviceSize alignment, - const VmaAllocationCreateInfo& createInfo, - VmaSuballocationType suballocType, - VmaAllocation* pAllocation); - - VkResult AllocateFromBlock( - VmaDeviceMemoryBlock* pBlock, - VkDeviceSize size, - VkDeviceSize alignment, - VmaAllocationCreateFlags allocFlags, - void* pUserData, - VmaSuballocationType suballocType, - uint32_t strategy, - VmaAllocation* pAllocation); - - VkResult CommitAllocationRequest( - VmaAllocationRequest& allocRequest, - VmaDeviceMemoryBlock* pBlock, - VkDeviceSize alignment, - VmaAllocationCreateFlags allocFlags, - void* pUserData, - VmaSuballocationType suballocType, - VmaAllocation* pAllocation); - - VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); - bool HasEmptyBlock(); -}; -#endif // _VMA_BLOCK_VECTOR - -#ifndef _VMA_DEFRAGMENTATION_CONTEXT -struct VmaDefragmentationContext_T -{ - VMA_CLASS_NO_COPY(VmaDefragmentationContext_T) -public: - VmaDefragmentationContext_T( - VmaAllocator hAllocator, - const VmaDefragmentationInfo& info); - ~VmaDefragmentationContext_T(); - - void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; } - - VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo); - VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo); - -private: - // Max number of allocations to ignore due to size constraints before ending single pass - static const uint8_t MAX_ALLOCS_TO_IGNORE = 16; - enum class CounterStatus { Pass, Ignore, End }; - - struct FragmentedBlock - { - uint32_t data; - VmaDeviceMemoryBlock* block; - }; - struct StateBalanced - { - VkDeviceSize avgFreeSize = 0; - VkDeviceSize avgAllocSize = UINT64_MAX; - }; - struct StateExtensive - { - enum class Operation : uint8_t - { - FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll, - MoveBuffers, MoveTextures, MoveAll, - Cleanup, Done - }; - - Operation operation = Operation::FindFreeBlockTexture; - size_t firstFreeBlock = SIZE_MAX; - }; - struct MoveAllocationData - { - VkDeviceSize size; - VkDeviceSize alignment; - VmaSuballocationType type; - VmaAllocationCreateFlags flags; - VmaDefragmentationMove move = {}; - }; - - const VkDeviceSize m_MaxPassBytes; - const uint32_t m_MaxPassAllocations; - - VmaStlAllocator m_MoveAllocator; - VmaVector> m_Moves; - - uint8_t m_IgnoredAllocs = 0; - uint32_t m_Algorithm; - uint32_t m_BlockVectorCount; - VmaBlockVector* m_PoolBlockVector; - VmaBlockVector** m_pBlockVectors; - size_t m_ImmovableBlockCount = 0; - VmaDefragmentationStats m_GlobalStats = { 0 }; - VmaDefragmentationStats m_PassStats = { 0 }; - void* m_AlgorithmState = VMA_NULL; - - static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata); - CounterStatus CheckCounters(VkDeviceSize bytes); - bool IncrementCounters(VkDeviceSize bytes); - bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block); - bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector); - - bool ComputeDefragmentation(VmaBlockVector& vector, size_t index); - bool ComputeDefragmentation_Fast(VmaBlockVector& vector); - bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update); - bool ComputeDefragmentation_Full(VmaBlockVector& vector); - bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index); - - void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state); - bool MoveDataToFreeBlocks(VmaSuballocationType currentType, - VmaBlockVector& vector, size_t firstFreeBlock, - bool& texturePresent, bool& bufferPresent, bool& otherPresent); -}; -#endif // _VMA_DEFRAGMENTATION_CONTEXT - -#ifndef _VMA_POOL_T -struct VmaPool_T -{ - friend struct VmaPoolListItemTraits; - VMA_CLASS_NO_COPY(VmaPool_T) -public: - VmaBlockVector m_BlockVector; - VmaDedicatedAllocationList m_DedicatedAllocations; - - VmaPool_T( - VmaAllocator hAllocator, - const VmaPoolCreateInfo& createInfo, - VkDeviceSize preferredBlockSize); - ~VmaPool_T(); - - uint32_t GetId() const { return m_Id; } - void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; } - - const char* GetName() const { return m_Name; } - void SetName(const char* pName); - -#if VMA_STATS_STRING_ENABLED - //void PrintDetailedMap(class VmaStringBuilder& sb); -#endif - -private: - uint32_t m_Id; - char* m_Name; - VmaPool_T* m_PrevPool = VMA_NULL; - VmaPool_T* m_NextPool = VMA_NULL; -}; - -struct VmaPoolListItemTraits -{ - typedef VmaPool_T ItemType; - - static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; } - static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; } - static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; } - static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; } -}; -#endif // _VMA_POOL_T - -#ifndef _VMA_CURRENT_BUDGET_DATA -struct VmaCurrentBudgetData -{ - VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS]; - VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS]; - VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS]; - VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS]; - -#if VMA_MEMORY_BUDGET - VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch; - VMA_RW_MUTEX m_BudgetMutex; - uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS]; - uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS]; - uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS]; -#endif // VMA_MEMORY_BUDGET - - VmaCurrentBudgetData(); - - void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); - void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); -}; - -#ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS -VmaCurrentBudgetData::VmaCurrentBudgetData() -{ - for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex) - { - m_BlockCount[heapIndex] = 0; - m_AllocationCount[heapIndex] = 0; - m_BlockBytes[heapIndex] = 0; - m_AllocationBytes[heapIndex] = 0; -#if VMA_MEMORY_BUDGET - m_VulkanUsage[heapIndex] = 0; - m_VulkanBudget[heapIndex] = 0; - m_BlockBytesAtBudgetFetch[heapIndex] = 0; -#endif - } - -#if VMA_MEMORY_BUDGET - m_OperationsSinceBudgetFetch = 0; -#endif -} - -void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) -{ - m_AllocationBytes[heapIndex] += allocationSize; - ++m_AllocationCount[heapIndex]; -#if VMA_MEMORY_BUDGET - ++m_OperationsSinceBudgetFetch; -#endif -} - -void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) -{ - VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); - m_AllocationBytes[heapIndex] -= allocationSize; - VMA_ASSERT(m_AllocationCount[heapIndex] > 0); - --m_AllocationCount[heapIndex]; -#if VMA_MEMORY_BUDGET - ++m_OperationsSinceBudgetFetch; -#endif -} -#endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS -#endif // _VMA_CURRENT_BUDGET_DATA - -#ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR -/* -Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects. -*/ -class VmaAllocationObjectAllocator -{ - VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator) -public: - VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) - : m_Allocator(pAllocationCallbacks, 1024) {} - - template VmaAllocation Allocate(Types&&... args); - void Free(VmaAllocation hAlloc); - -private: - VMA_MUTEX m_Mutex; - VmaPoolAllocator m_Allocator; -}; - -template -VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args) -{ - VmaMutexLock mutexLock(m_Mutex); - return m_Allocator.Alloc(std::forward(args)...); -} - -void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) -{ - VmaMutexLock mutexLock(m_Mutex); - m_Allocator.Free(hAlloc); -} -#endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR - -#ifndef _VMA_VIRTUAL_BLOCK_T -struct VmaVirtualBlock_T -{ - VMA_CLASS_NO_COPY(VmaVirtualBlock_T) -public: - const bool m_AllocationCallbacksSpecified; - const VkAllocationCallbacks m_AllocationCallbacks; - - VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo); - ~VmaVirtualBlock_T(); - - VkResult Init() { return VK_SUCCESS; } - bool IsEmpty() const { return m_Metadata->IsEmpty(); } - void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); } - void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); } - void Clear() { m_Metadata->Clear(); } - - const VkAllocationCallbacks* GetAllocationCallbacks() const; - void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo); - VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, - VkDeviceSize* outOffset); - void GetStatistics(VmaStatistics& outStats) const; - void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const; -#if VMA_STATS_STRING_ENABLED - void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const; -#endif - -private: - VmaBlockMetadata* m_Metadata; -}; - -#ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS -VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo) - : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL), - m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks) -{ - const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK; - switch (algorithm) - { - default: - VMA_ASSERT(0); - case 0: - m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); - break; - case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT: - m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true); - break; - } - - m_Metadata->Init(createInfo.size); -} - -VmaVirtualBlock_T::~VmaVirtualBlock_T() -{ - // Define macro VMA_DEBUG_LOG to receive the list of the unfreed allocations - if (!m_Metadata->IsEmpty()) - m_Metadata->DebugLogAllAllocations(); - // This is the most important assert in the entire library. - // Hitting it means you have some memory leak - unreleased virtual allocations. - VMA_ASSERT(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!"); - - vma_delete(GetAllocationCallbacks(), m_Metadata); -} - -const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const -{ - return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; -} - -void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo) -{ - m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo); -} - -VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, - VkDeviceSize* outOffset) -{ - VmaAllocationRequest request = {}; - if (m_Metadata->CreateAllocationRequest( - createInfo.size, // allocSize - VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment - (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress - VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant - createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy - &request)) - { - m_Metadata->Alloc(request, - VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant - createInfo.pUserData); - outAllocation = (VmaVirtualAllocation)request.allocHandle; - if(outOffset) - *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle); - return VK_SUCCESS; - } - outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE; - if (outOffset) - *outOffset = UINT64_MAX; - return VK_ERROR_OUT_OF_DEVICE_MEMORY; -} - -void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const -{ - VmaClearStatistics(outStats); - m_Metadata->AddStatistics(outStats); -} - -void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const -{ - VmaClearDetailedStatistics(outStats); - m_Metadata->AddDetailedStatistics(outStats); -} - -#if VMA_STATS_STRING_ENABLED -void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const -{ - VmaJsonWriter json(GetAllocationCallbacks(), sb); - json.BeginObject(); - - VmaDetailedStatistics stats; - CalculateDetailedStatistics(stats); - - json.WriteString("Stats"); - VmaPrintDetailedStatistics(json, stats); - - if (detailedMap) - { - json.WriteString("Details"); - json.BeginObject(); - m_Metadata->PrintDetailedMap(json); - json.EndObject(); - } - - json.EndObject(); -} -#endif // VMA_STATS_STRING_ENABLED -#endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS -#endif // _VMA_VIRTUAL_BLOCK_T - - -// Main allocator object. -struct VmaAllocator_T -{ - VMA_CLASS_NO_COPY(VmaAllocator_T) -public: - bool m_UseMutex; - uint32_t m_VulkanApiVersion; - bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). - bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). - bool m_UseExtMemoryBudget; - bool m_UseAmdDeviceCoherentMemory; - bool m_UseKhrBufferDeviceAddress; - bool m_UseExtMemoryPriority; - VkDevice m_hDevice; - VkInstance m_hInstance; - bool m_AllocationCallbacksSpecified; - VkAllocationCallbacks m_AllocationCallbacks; - VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks; - VmaAllocationObjectAllocator m_AllocationObjectAllocator; - - // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size. - uint32_t m_HeapSizeLimitMask; - - VkPhysicalDeviceProperties m_PhysicalDeviceProperties; - VkPhysicalDeviceMemoryProperties m_MemProps; - - // Default pools. - VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; - VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES]; - - VmaCurrentBudgetData m_Budget; - VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects. - - VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo); - VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo); - ~VmaAllocator_T(); - - const VkAllocationCallbacks* GetAllocationCallbacks() const - { - return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; - } - const VmaVulkanFunctions& GetVulkanFunctions() const - { - return m_VulkanFunctions; - } - - VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; } - - VkDeviceSize GetBufferImageGranularity() const - { - return VMA_MAX( - static_cast(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY), - m_PhysicalDeviceProperties.limits.bufferImageGranularity); - } - - uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; } - uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; } - - uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const - { - VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount); - return m_MemProps.memoryTypes[memTypeIndex].heapIndex; - } - // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT. - bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const - { - return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) == - VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - } - // Minimum alignment for all allocations in specific memory type. - VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const - { - return IsMemoryTypeNonCoherent(memTypeIndex) ? - VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : - (VkDeviceSize)VMA_MIN_ALIGNMENT; - } - - bool IsIntegratedGpu() const - { - return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU; - } - - uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; } - - void GetBufferMemoryRequirements( - VkBuffer hBuffer, - VkMemoryRequirements& memReq, - bool& requiresDedicatedAllocation, - bool& prefersDedicatedAllocation) const; - void GetImageMemoryRequirements( - VkImage hImage, - VkMemoryRequirements& memReq, - bool& requiresDedicatedAllocation, - bool& prefersDedicatedAllocation) const; - VkResult FindMemoryTypeIndex( - uint32_t memoryTypeBits, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown. - uint32_t* pMemoryTypeIndex) const; - - // Main allocation function. - VkResult AllocateMemory( - const VkMemoryRequirements& vkMemReq, - bool requiresDedicatedAllocation, - bool prefersDedicatedAllocation, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - VkFlags dedicatedBufferImageUsage, // UINT32_MAX if unknown. - const VmaAllocationCreateInfo& createInfo, - VmaSuballocationType suballocType, - size_t allocationCount, - VmaAllocation* pAllocations); - - // Main deallocation function. - void FreeMemory( - size_t allocationCount, - const VmaAllocation* pAllocations); - - void CalculateStatistics(VmaTotalStatistics* pStats); - - void GetHeapBudgets( - VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount); - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json); -#endif - - void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo); - - VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool); - void DestroyPool(VmaPool pool); - void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats); - void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats); - - void SetCurrentFrameIndex(uint32_t frameIndex); - uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); } - - VkResult CheckPoolCorruption(VmaPool hPool); - VkResult CheckCorruption(uint32_t memoryTypeBits); - - // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping. - VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory); - // Call to Vulkan function vkFreeMemory with accompanying bookkeeping. - void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory); - // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR. - VkResult BindVulkanBuffer( - VkDeviceMemory memory, - VkDeviceSize memoryOffset, - VkBuffer buffer, - const void* pNext); - // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR. - VkResult BindVulkanImage( - VkDeviceMemory memory, - VkDeviceSize memoryOffset, - VkImage image, - const void* pNext); - - VkResult Map(VmaAllocation hAllocation, void** ppData); - void Unmap(VmaAllocation hAllocation); - - VkResult BindBufferMemory( - VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkBuffer hBuffer, - const void* pNext); - VkResult BindImageMemory( - VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkImage hImage, - const void* pNext); - - VkResult FlushOrInvalidateAllocation( - VmaAllocation hAllocation, - VkDeviceSize offset, VkDeviceSize size, - VMA_CACHE_OPERATION op); - VkResult FlushOrInvalidateAllocations( - uint32_t allocationCount, - const VmaAllocation* allocations, - const VkDeviceSize* offsets, const VkDeviceSize* sizes, - VMA_CACHE_OPERATION op); - - void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern); - - /* - Returns bit mask of memory types that can support defragmentation on GPU as - they support creation of required buffer for copy operations. - */ - uint32_t GetGpuDefragmentationMemoryTypeBits(); - -#if VMA_EXTERNAL_MEMORY - VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const - { - return m_TypeExternalMemoryHandleTypes[memTypeIndex]; - } -#endif // #if VMA_EXTERNAL_MEMORY - -private: - VkDeviceSize m_PreferredLargeHeapBlockSize; - - VkPhysicalDevice m_PhysicalDevice; - VMA_ATOMIC_UINT32 m_CurrentFrameIndex; - VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized. -#if VMA_EXTERNAL_MEMORY - VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES]; -#endif // #if VMA_EXTERNAL_MEMORY - - VMA_RW_MUTEX m_PoolsMutex; - typedef VmaIntrusiveLinkedList PoolList; - // Protected by m_PoolsMutex. - PoolList m_Pools; - uint32_t m_NextPoolId; - - VmaVulkanFunctions m_VulkanFunctions; - - // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types. - uint32_t m_GlobalMemoryTypeBits; - - void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions); - -#if VMA_STATIC_VULKAN_FUNCTIONS == 1 - void ImportVulkanFunctions_Static(); -#endif - - void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions); - -#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 - void ImportVulkanFunctions_Dynamic(); -#endif - - void ValidateVulkanFunctions(); - - VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex); - - VkResult AllocateMemoryOfType( - VmaPool pool, - VkDeviceSize size, - VkDeviceSize alignment, - bool dedicatedPreferred, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - VkFlags dedicatedBufferImageUsage, - const VmaAllocationCreateInfo& createInfo, - uint32_t memTypeIndex, - VmaSuballocationType suballocType, - VmaDedicatedAllocationList& dedicatedAllocations, - VmaBlockVector& blockVector, - size_t allocationCount, - VmaAllocation* pAllocations); - - // Helper function only to be used inside AllocateDedicatedMemory. - VkResult AllocateDedicatedMemoryPage( - VmaPool pool, - VkDeviceSize size, - VmaSuballocationType suballocType, - uint32_t memTypeIndex, - const VkMemoryAllocateInfo& allocInfo, - bool map, - bool isUserDataString, - bool isMappingAllowed, - void* pUserData, - VmaAllocation* pAllocation); - - // Allocates and registers new VkDeviceMemory specifically for dedicated allocations. - VkResult AllocateDedicatedMemory( - VmaPool pool, - VkDeviceSize size, - VmaSuballocationType suballocType, - VmaDedicatedAllocationList& dedicatedAllocations, - uint32_t memTypeIndex, - bool map, - bool isUserDataString, - bool isMappingAllowed, - bool canAliasMemory, - void* pUserData, - float priority, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - VkFlags dedicatedBufferImageUsage, - size_t allocationCount, - VmaAllocation* pAllocations, - const void* pNextChain = nullptr); - - void FreeDedicatedMemory(const VmaAllocation allocation); - - VkResult CalcMemTypeParams( - VmaAllocationCreateInfo& outCreateInfo, - uint32_t memTypeIndex, - VkDeviceSize size, - size_t allocationCount); - VkResult CalcAllocationParams( - VmaAllocationCreateInfo& outCreateInfo, - bool dedicatedRequired, - bool dedicatedPreferred); - - /* - Calculates and returns bit mask of memory types that can support defragmentation - on GPU as they support creation of required buffer for copy operations. - */ - uint32_t CalculateGpuDefragmentationMemoryTypeBits() const; - uint32_t CalculateGlobalMemoryTypeBits() const; - - bool GetFlushOrInvalidateRange( - VmaAllocation allocation, - VkDeviceSize offset, VkDeviceSize size, - VkMappedMemoryRange& outRange) const; - -#if VMA_MEMORY_BUDGET - void UpdateVulkanBudget(); -#endif // #if VMA_MEMORY_BUDGET -}; - - -#ifndef _VMA_MEMORY_FUNCTIONS -static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) -{ - return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment); -} - -static void VmaFree(VmaAllocator hAllocator, void* ptr) -{ - VmaFree(&hAllocator->m_AllocationCallbacks, ptr); -} - -template -static T* VmaAllocate(VmaAllocator hAllocator) -{ - return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); -} - -template -static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count) -{ - return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); -} - -template -static void vma_delete(VmaAllocator hAllocator, T* ptr) -{ - if(ptr != VMA_NULL) - { - ptr->~T(); - VmaFree(hAllocator, ptr); - } -} - -template -static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count) -{ - if(ptr != VMA_NULL) - { - for(size_t i = count; i--; ) - ptr[i].~T(); - VmaFree(hAllocator, ptr); - } -} -#endif // _VMA_MEMORY_FUNCTIONS - -#ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS -VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) - : m_pMetadata(VMA_NULL), - m_MemoryTypeIndex(UINT32_MAX), - m_Id(0), - m_hMemory(VK_NULL_HANDLE), - m_MapCount(0), - m_pMappedData(VMA_NULL) {} - -VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock() -{ - VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped."); - VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); -} - -void VmaDeviceMemoryBlock::Init( - VmaAllocator hAllocator, - VmaPool hParentPool, - uint32_t newMemoryTypeIndex, - VkDeviceMemory newMemory, - VkDeviceSize newSize, - uint32_t id, - uint32_t algorithm, - VkDeviceSize bufferImageGranularity) -{ - VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); - - m_hParentPool = hParentPool; - m_MemoryTypeIndex = newMemoryTypeIndex; - m_Id = id; - m_hMemory = newMemory; - - switch (algorithm) - { - case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: - m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(), - bufferImageGranularity, false); // isVirtual - break; - default: - VMA_ASSERT(0); - // Fall-through. - case 0: - m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), - bufferImageGranularity, false); // isVirtual - } - m_pMetadata->Init(newSize); -} - -void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) -{ - // Define macro VMA_DEBUG_LOG to receive the list of the unfreed allocations - if (!m_pMetadata->IsEmpty()) - m_pMetadata->DebugLogAllAllocations(); - // This is the most important assert in the entire library. - // Hitting it means you have some memory leak - unreleased VmaAllocation objects. - VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!"); - - VMA_ASSERT(m_hMemory != VK_NULL_HANDLE); - allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory); - m_hMemory = VK_NULL_HANDLE; - - vma_delete(allocator, m_pMetadata); - m_pMetadata = VMA_NULL; -} - -void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator) -{ - if(m_MappingHysteresis.PostFree()) - { - VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0); - if (m_MapCount == 0) - { - m_pMappedData = VMA_NULL; - (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); - } - } -} - -bool VmaDeviceMemoryBlock::Validate() const -{ - VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) && - (m_pMetadata->GetSize() != 0)); - - return m_pMetadata->Validate(); -} - -VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) -{ - void* pData = nullptr; - VkResult res = Map(hAllocator, 1, &pData); - if (res != VK_SUCCESS) - { - return res; - } - - res = m_pMetadata->CheckCorruption(pData); - - Unmap(hAllocator, 1); - - return res; -} - -VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData) -{ - if (count == 0) - { - return VK_SUCCESS; - } - - VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); - const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); - m_MappingHysteresis.PostMap(); - if (oldTotalMapCount != 0) - { - m_MapCount += count; - VMA_ASSERT(m_pMappedData != VMA_NULL); - if (ppData != VMA_NULL) - { - *ppData = m_pMappedData; - } - return VK_SUCCESS; - } - else - { - VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( - hAllocator->m_hDevice, - m_hMemory, - 0, // offset - VK_WHOLE_SIZE, - 0, // flags - &m_pMappedData); - if (result == VK_SUCCESS) - { - if (ppData != VMA_NULL) - { - *ppData = m_pMappedData; - } - m_MapCount = count; - } - return result; - } -} - -void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) -{ - if (count == 0) - { - return; - } - - VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); - if (m_MapCount >= count) - { - m_MapCount -= count; - const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); - if (totalMapCount == 0) - { - m_pMappedData = VMA_NULL; - (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); - } - m_MappingHysteresis.PostUnmap(); - } - else - { - VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped."); - } -} - -VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) -{ - VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); - - void* pData; - VkResult res = Map(hAllocator, 1, &pData); - if (res != VK_SUCCESS) - { - return res; - } - - VmaWriteMagicValue(pData, allocOffset + allocSize); - - Unmap(hAllocator, 1); - return VK_SUCCESS; -} - -VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) -{ - VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); - - void* pData; - VkResult res = Map(hAllocator, 1, &pData); - if (res != VK_SUCCESS) - { - return res; - } - - if (!VmaValidateMagicValue(pData, allocOffset + allocSize)) - { - VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!"); - } - - Unmap(hAllocator, 1); - return VK_SUCCESS; -} - -VkResult VmaDeviceMemoryBlock::BindBufferMemory( - const VmaAllocator hAllocator, - const VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkBuffer hBuffer, - const void* pNext) -{ - VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && - hAllocation->GetBlock() == this); - VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && - "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); - const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; - // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. - VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); - return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext); -} - -VkResult VmaDeviceMemoryBlock::BindImageMemory( - const VmaAllocator hAllocator, - const VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkImage hImage, - const void* pNext) -{ - VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && - hAllocation->GetBlock() == this); - VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && - "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); - const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; - // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. - VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); - return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext); -} -#endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS - -#ifndef _VMA_ALLOCATION_T_FUNCTIONS -VmaAllocation_T::VmaAllocation_T(bool mappingAllowed) - : m_Alignment{ 1 }, - m_Size{ 0 }, - m_pUserData{ VMA_NULL }, - m_pName{ VMA_NULL }, - m_MemoryTypeIndex{ 0 }, - m_Type{ (uint8_t)ALLOCATION_TYPE_NONE }, - m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN }, - m_MapCount{ 0 }, - m_Flags{ 0 } -{ - if(mappingAllowed) - m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED; - -#if VMA_STATS_STRING_ENABLED - m_BufferImageUsage = 0; -#endif -} - -VmaAllocation_T::~VmaAllocation_T() -{ - VMA_ASSERT(m_MapCount == 0 && "Allocation was not unmapped before destruction."); - - // Check if owned string was freed. - VMA_ASSERT(m_pName == VMA_NULL); -} - -void VmaAllocation_T::InitBlockAllocation( - VmaDeviceMemoryBlock* block, - VmaAllocHandle allocHandle, - VkDeviceSize alignment, - VkDeviceSize size, - uint32_t memoryTypeIndex, - VmaSuballocationType suballocationType, - bool mapped) -{ - VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); - VMA_ASSERT(block != VMA_NULL); - m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; - m_Alignment = alignment; - m_Size = size; - m_MemoryTypeIndex = memoryTypeIndex; - if(mapped) - { - VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); - m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; - } - m_SuballocationType = (uint8_t)suballocationType; - m_BlockAllocation.m_Block = block; - m_BlockAllocation.m_AllocHandle = allocHandle; -} - -void VmaAllocation_T::InitDedicatedAllocation( - VmaPool hParentPool, - uint32_t memoryTypeIndex, - VkDeviceMemory hMemory, - VmaSuballocationType suballocationType, - void* pMappedData, - VkDeviceSize size) -{ - VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); - VMA_ASSERT(hMemory != VK_NULL_HANDLE); - m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED; - m_Alignment = 0; - m_Size = size; - m_MemoryTypeIndex = memoryTypeIndex; - m_SuballocationType = (uint8_t)suballocationType; - if(pMappedData != VMA_NULL) - { - VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); - m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; - } - m_DedicatedAllocation.m_hParentPool = hParentPool; - m_DedicatedAllocation.m_hMemory = hMemory; - m_DedicatedAllocation.m_pMappedData = pMappedData; - m_DedicatedAllocation.m_Prev = VMA_NULL; - m_DedicatedAllocation.m_Next = VMA_NULL; -} - -void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName) -{ - VMA_ASSERT(pName == VMA_NULL || pName != m_pName); - - FreeName(hAllocator); - - if (pName != VMA_NULL) - m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName); -} - -uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation) -{ - VMA_ASSERT(allocation != VMA_NULL); - VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); - VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK); - - if (m_MapCount != 0) - m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount); - - m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation); - VMA_SWAP(m_BlockAllocation, allocation->m_BlockAllocation); - m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this); - -#if VMA_STATS_STRING_ENABLED - VMA_SWAP(m_BufferImageUsage, allocation->m_BufferImageUsage); -#endif - return m_MapCount; -} - -VmaAllocHandle VmaAllocation_T::GetAllocHandle() const -{ - switch (m_Type) - { - case ALLOCATION_TYPE_BLOCK: - return m_BlockAllocation.m_AllocHandle; - case ALLOCATION_TYPE_DEDICATED: - return VK_NULL_HANDLE; - default: - VMA_ASSERT(0); - return VK_NULL_HANDLE; - } -} - -VkDeviceSize VmaAllocation_T::GetOffset() const -{ - switch (m_Type) - { - case ALLOCATION_TYPE_BLOCK: - return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle); - case ALLOCATION_TYPE_DEDICATED: - return 0; - default: - VMA_ASSERT(0); - return 0; - } -} - -VmaPool VmaAllocation_T::GetParentPool() const -{ - switch (m_Type) - { - case ALLOCATION_TYPE_BLOCK: - return m_BlockAllocation.m_Block->GetParentPool(); - case ALLOCATION_TYPE_DEDICATED: - return m_DedicatedAllocation.m_hParentPool; - default: - VMA_ASSERT(0); - return VK_NULL_HANDLE; - } -} - -VkDeviceMemory VmaAllocation_T::GetMemory() const -{ - switch (m_Type) - { - case ALLOCATION_TYPE_BLOCK: - return m_BlockAllocation.m_Block->GetDeviceMemory(); - case ALLOCATION_TYPE_DEDICATED: - return m_DedicatedAllocation.m_hMemory; - default: - VMA_ASSERT(0); - return VK_NULL_HANDLE; - } -} - -void* VmaAllocation_T::GetMappedData() const -{ - switch (m_Type) - { - case ALLOCATION_TYPE_BLOCK: - if (m_MapCount != 0 || IsPersistentMap()) - { - void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); - VMA_ASSERT(pBlockData != VMA_NULL); - return (char*)pBlockData + GetOffset(); - } - else - { - return VMA_NULL; - } - break; - case ALLOCATION_TYPE_DEDICATED: - VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap())); - return m_DedicatedAllocation.m_pMappedData; - default: - VMA_ASSERT(0); - return VMA_NULL; - } -} - -void VmaAllocation_T::BlockAllocMap() -{ - VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); - VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); - - if (m_MapCount < 0xFF) - { - ++m_MapCount; - } - else - { - VMA_ASSERT(0 && "Allocation mapped too many times simultaneously."); - } -} - -void VmaAllocation_T::BlockAllocUnmap() -{ - VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); - - if (m_MapCount > 0) - { - --m_MapCount; - } - else - { - VMA_ASSERT(0 && "Unmapping allocation not previously mapped."); - } -} - -VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData) -{ - VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); - VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); - - if (m_MapCount != 0 || IsPersistentMap()) - { - if (m_MapCount < 0xFF) - { - VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL); - *ppData = m_DedicatedAllocation.m_pMappedData; - ++m_MapCount; - return VK_SUCCESS; - } - else - { - VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously."); - return VK_ERROR_MEMORY_MAP_FAILED; - } - } - else - { - VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( - hAllocator->m_hDevice, - m_DedicatedAllocation.m_hMemory, - 0, // offset - VK_WHOLE_SIZE, - 0, // flags - ppData); - if (result == VK_SUCCESS) - { - m_DedicatedAllocation.m_pMappedData = *ppData; - m_MapCount = 1; - } - return result; - } -} - -void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) -{ - VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); - - if (m_MapCount > 0) - { - --m_MapCount; - if (m_MapCount == 0 && !IsPersistentMap()) - { - m_DedicatedAllocation.m_pMappedData = VMA_NULL; - (*hAllocator->GetVulkanFunctions().vkUnmapMemory)( - hAllocator->m_hDevice, - m_DedicatedAllocation.m_hMemory); - } - } - else - { - VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped."); - } -} - -#if VMA_STATS_STRING_ENABLED -void VmaAllocation_T::InitBufferImageUsage(uint32_t bufferImageUsage) -{ - VMA_ASSERT(m_BufferImageUsage == 0); - m_BufferImageUsage = bufferImageUsage; -} - -void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const -{ - json.WriteString("Type"); - json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]); - - json.WriteString("Size"); - json.WriteNumber(m_Size); - json.WriteString("Usage"); - json.WriteNumber(m_BufferImageUsage); - - if (m_pUserData != VMA_NULL) - { - json.WriteString("CustomData"); - json.BeginString(); - json.ContinueString_Pointer(m_pUserData); - json.EndString(); - } - if (m_pName != VMA_NULL) - { - json.WriteString("Name"); - json.WriteString(m_pName); - } -} -#endif // VMA_STATS_STRING_ENABLED - -void VmaAllocation_T::FreeName(VmaAllocator hAllocator) -{ - if(m_pName) - { - VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName); - m_pName = VMA_NULL; - } -} -#endif // _VMA_ALLOCATION_T_FUNCTIONS - -#ifndef _VMA_BLOCK_VECTOR_FUNCTIONS -VmaBlockVector::VmaBlockVector( - VmaAllocator hAllocator, - VmaPool hParentPool, - uint32_t memoryTypeIndex, - VkDeviceSize preferredBlockSize, - size_t minBlockCount, - size_t maxBlockCount, - VkDeviceSize bufferImageGranularity, - bool explicitBlockSize, - uint32_t algorithm, - float priority, - VkDeviceSize minAllocationAlignment, - void* pMemoryAllocateNext) - : m_hAllocator(hAllocator), - m_hParentPool(hParentPool), - m_MemoryTypeIndex(memoryTypeIndex), - m_PreferredBlockSize(preferredBlockSize), - m_MinBlockCount(minBlockCount), - m_MaxBlockCount(maxBlockCount), - m_BufferImageGranularity(bufferImageGranularity), - m_ExplicitBlockSize(explicitBlockSize), - m_Algorithm(algorithm), - m_Priority(priority), - m_MinAllocationAlignment(minAllocationAlignment), - m_pMemoryAllocateNext(pMemoryAllocateNext), - m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), - m_NextBlockId(0) {} - -VmaBlockVector::~VmaBlockVector() -{ - for (size_t i = m_Blocks.size(); i--; ) - { - m_Blocks[i]->Destroy(m_hAllocator); - vma_delete(m_hAllocator, m_Blocks[i]); - } -} - -VkResult VmaBlockVector::CreateMinBlocks() -{ - for (size_t i = 0; i < m_MinBlockCount; ++i) - { - VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL); - if (res != VK_SUCCESS) - { - return res; - } - } - return VK_SUCCESS; -} - -void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats) -{ - VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - - const size_t blockCount = m_Blocks.size(); - for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) - { - const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pBlock); - VMA_HEAVY_ASSERT(pBlock->Validate()); - pBlock->m_pMetadata->AddStatistics(inoutStats); - } -} - -void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) -{ - VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - - const size_t blockCount = m_Blocks.size(); - for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) - { - const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pBlock); - VMA_HEAVY_ASSERT(pBlock->Validate()); - pBlock->m_pMetadata->AddDetailedStatistics(inoutStats); - } -} - -bool VmaBlockVector::IsEmpty() -{ - VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - return m_Blocks.empty(); -} - -bool VmaBlockVector::IsCorruptionDetectionEnabled() const -{ - const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; - return (VMA_DEBUG_DETECT_CORRUPTION != 0) && - (VMA_DEBUG_MARGIN > 0) && - (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && - (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; -} - -VkResult VmaBlockVector::Allocate( - VkDeviceSize size, - VkDeviceSize alignment, - const VmaAllocationCreateInfo& createInfo, - VmaSuballocationType suballocType, - size_t allocationCount, - VmaAllocation* pAllocations) -{ - size_t allocIndex; - VkResult res = VK_SUCCESS; - - alignment = VMA_MAX(alignment, m_MinAllocationAlignment); - - if (IsCorruptionDetectionEnabled()) - { - size = VmaAlignUp(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); - alignment = VmaAlignUp(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); - } - - { - VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); - for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) - { - res = AllocatePage( - size, - alignment, - createInfo, - suballocType, - pAllocations + allocIndex); - if (res != VK_SUCCESS) - { - break; - } - } - } - - if (res != VK_SUCCESS) - { - // Free all already created allocations. - while (allocIndex--) - Free(pAllocations[allocIndex]); - memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); - } - - return res; -} - -VkResult VmaBlockVector::AllocatePage( - VkDeviceSize size, - VkDeviceSize alignment, - const VmaAllocationCreateInfo& createInfo, - VmaSuballocationType suballocType, - VmaAllocation* pAllocation) -{ - const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; - - VkDeviceSize freeMemory; - { - const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); - VmaBudget heapBudget = {}; - m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); - freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0; - } - - const bool canFallbackToDedicated = !HasExplicitBlockSize() && - (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0; - const bool canCreateNewBlock = - ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) && - (m_Blocks.size() < m_MaxBlockCount) && - (freeMemory >= size || !canFallbackToDedicated); - uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK; - - // Upper address can only be used with linear allocator and within single memory block. - if (isUpperAddress && - (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1)) - { - return VK_ERROR_FEATURE_NOT_PRESENT; - } - - // Early reject: requested allocation size is larger that maximum block size for this block vector. - if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize) - { - return VK_ERROR_OUT_OF_DEVICE_MEMORY; - } - - // 1. Search existing allocations. Try to allocate. - if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) - { - // Use only last block. - if (!m_Blocks.empty()) - { - VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back(); - VMA_ASSERT(pCurrBlock); - VkResult res = AllocateFromBlock( - pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); - if (res == VK_SUCCESS) - { - VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId()); - IncrementallySortBlocks(); - return VK_SUCCESS; - } - } - } - else - { - if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default - { - const bool isHostVisible = - (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; - if(isHostVisible) - { - const bool isMappingAllowed = (createInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; - /* - For non-mappable allocations, check blocks that are not mapped first. - For mappable allocations, check blocks that are already mapped first. - This way, having many blocks, we will separate mappable and non-mappable allocations, - hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc. - */ - for(size_t mappingI = 0; mappingI < 2; ++mappingI) - { - // Forward order in m_Blocks - prefer blocks with smallest amount of free space. - for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) - { - VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pCurrBlock); - const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL; - if((mappingI == 0) == (isMappingAllowed == isBlockMapped)) - { - VkResult res = AllocateFromBlock( - pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); - if (res == VK_SUCCESS) - { - VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); - IncrementallySortBlocks(); - return VK_SUCCESS; - } - } - } - } - } - else - { - // Forward order in m_Blocks - prefer blocks with smallest amount of free space. - for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) - { - VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pCurrBlock); - VkResult res = AllocateFromBlock( - pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); - if (res == VK_SUCCESS) - { - VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); - IncrementallySortBlocks(); - return VK_SUCCESS; - } - } - } - } - else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT - { - // Backward order in m_Blocks - prefer blocks with largest amount of free space. - for (size_t blockIndex = m_Blocks.size(); blockIndex--; ) - { - VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pCurrBlock); - VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); - if (res == VK_SUCCESS) - { - VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); - IncrementallySortBlocks(); - return VK_SUCCESS; - } - } - } - } - - // 2. Try to create new block. - if (canCreateNewBlock) - { - // Calculate optimal size for new block. - VkDeviceSize newBlockSize = m_PreferredBlockSize; - uint32_t newBlockSizeShift = 0; - const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3; - - if (!m_ExplicitBlockSize) - { - // Allocate 1/8, 1/4, 1/2 as first blocks. - const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize(); - for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i) - { - const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; - if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2) - { - newBlockSize = smallerNewBlockSize; - ++newBlockSizeShift; - } - else - { - break; - } - } - } - - size_t newBlockIndex = 0; - VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? - CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; - // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize. - if (!m_ExplicitBlockSize) - { - while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX) - { - const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; - if (smallerNewBlockSize >= size) - { - newBlockSize = smallerNewBlockSize; - ++newBlockSizeShift; - res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? - CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; - } - else - { - break; - } - } - } - - if (res == VK_SUCCESS) - { - VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex]; - VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size); - - res = AllocateFromBlock( - pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); - if (res == VK_SUCCESS) - { - VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize); - IncrementallySortBlocks(); - return VK_SUCCESS; - } - else - { - // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment. - return VK_ERROR_OUT_OF_DEVICE_MEMORY; - } - } - } - - return VK_ERROR_OUT_OF_DEVICE_MEMORY; -} - -void VmaBlockVector::Free(const VmaAllocation hAllocation) -{ - VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; - - bool budgetExceeded = false; - { - const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); - VmaBudget heapBudget = {}; - m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); - budgetExceeded = heapBudget.usage >= heapBudget.budget; - } - - // Scope for lock. - { - VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); - - VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); - - if (IsCorruptionDetectionEnabled()) - { - VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize()); - VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value."); - } - - if (hAllocation->IsPersistentMap()) - { - pBlock->Unmap(m_hAllocator, 1); - } - - const bool hadEmptyBlockBeforeFree = HasEmptyBlock(); - pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle()); - pBlock->PostFree(m_hAllocator); - VMA_HEAVY_ASSERT(pBlock->Validate()); - - VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex); - - const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount; - // pBlock became empty after this deallocation. - if (pBlock->m_pMetadata->IsEmpty()) - { - // Already had empty block. We don't want to have two, so delete this one. - if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock) - { - pBlockToDelete = pBlock; - Remove(pBlock); - } - // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth. - } - // pBlock didn't become empty, but we have another empty block - find and free that one. - // (This is optional, heuristics.) - else if (hadEmptyBlockBeforeFree && canDeleteBlock) - { - VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back(); - if (pLastBlock->m_pMetadata->IsEmpty()) - { - pBlockToDelete = pLastBlock; - m_Blocks.pop_back(); - } - } - - IncrementallySortBlocks(); - } - - // Destruction of a free block. Deferred until this point, outside of mutex - // lock, for performance reason. - if (pBlockToDelete != VMA_NULL) - { - VMA_DEBUG_LOG(" Deleted empty block #%u", pBlockToDelete->GetId()); - pBlockToDelete->Destroy(m_hAllocator); - vma_delete(m_hAllocator, pBlockToDelete); - } - - m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize()); - m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation); -} - -VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const -{ - VkDeviceSize result = 0; - for (size_t i = m_Blocks.size(); i--; ) - { - result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize()); - if (result >= m_PreferredBlockSize) - { - break; - } - } - return result; -} - -void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock) -{ - for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) - { - if (m_Blocks[blockIndex] == pBlock) - { - VmaVectorRemove(m_Blocks, blockIndex); - return; - } - } - VMA_ASSERT(0); -} - -void VmaBlockVector::IncrementallySortBlocks() -{ - if (!m_IncrementalSort) - return; - if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) - { - // Bubble sort only until first swap. - for (size_t i = 1; i < m_Blocks.size(); ++i) - { - if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize()) - { - VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]); - return; - } - } - } -} - -void VmaBlockVector::SortByFreeSize() -{ - VMA_SORT(m_Blocks.begin(), m_Blocks.end(), - [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool - { - return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize(); - }); -} - -VkResult VmaBlockVector::AllocateFromBlock( - VmaDeviceMemoryBlock* pBlock, - VkDeviceSize size, - VkDeviceSize alignment, - VmaAllocationCreateFlags allocFlags, - void* pUserData, - VmaSuballocationType suballocType, - uint32_t strategy, - VmaAllocation* pAllocation) -{ - const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; - - VmaAllocationRequest currRequest = {}; - if (pBlock->m_pMetadata->CreateAllocationRequest( - size, - alignment, - isUpperAddress, - suballocType, - strategy, - &currRequest)) - { - return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation); - } - return VK_ERROR_OUT_OF_DEVICE_MEMORY; -} - -VkResult VmaBlockVector::CommitAllocationRequest( - VmaAllocationRequest& allocRequest, - VmaDeviceMemoryBlock* pBlock, - VkDeviceSize alignment, - VmaAllocationCreateFlags allocFlags, - void* pUserData, - VmaSuballocationType suballocType, - VmaAllocation* pAllocation) -{ - const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; - const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; - const bool isMappingAllowed = (allocFlags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; - - pBlock->PostAlloc(); - // Allocate from pCurrBlock. - if (mapped) - { - VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL); - if (res != VK_SUCCESS) - { - return res; - } - } - - *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed); - pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation); - (*pAllocation)->InitBlockAllocation( - pBlock, - allocRequest.allocHandle, - alignment, - allocRequest.size, // Not size, as actual allocation size may be larger than requested! - m_MemoryTypeIndex, - suballocType, - mapped); - VMA_HEAVY_ASSERT(pBlock->Validate()); - if (isUserDataString) - (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData); - else - (*pAllocation)->SetUserData(m_hAllocator, pUserData); - m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size); - if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) - { - m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); - } - if (IsCorruptionDetectionEnabled()) - { - VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size); - VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); - } - return VK_SUCCESS; -} - -VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) -{ - VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; - allocInfo.pNext = m_pMemoryAllocateNext; - allocInfo.memoryTypeIndex = m_MemoryTypeIndex; - allocInfo.allocationSize = blockSize; - -#if VMA_BUFFER_DEVICE_ADDRESS - // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature. - VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; - if (m_hAllocator->m_UseKhrBufferDeviceAddress) - { - allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; - VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); - } -#endif // VMA_BUFFER_DEVICE_ADDRESS - -#if VMA_MEMORY_PRIORITY - VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; - if (m_hAllocator->m_UseExtMemoryPriority) - { - VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f); - priorityInfo.priority = m_Priority; - VmaPnextChainPushFront(&allocInfo, &priorityInfo); - } -#endif // VMA_MEMORY_PRIORITY - -#if VMA_EXTERNAL_MEMORY - // Attach VkExportMemoryAllocateInfoKHR if necessary. - VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; - exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex); - if (exportMemoryAllocInfo.handleTypes != 0) - { - VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); - } -#endif // VMA_EXTERNAL_MEMORY - - VkDeviceMemory mem = VK_NULL_HANDLE; - VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem); - if (res < 0) - { - return res; - } - - // New VkDeviceMemory successfully created. - - // Create new Allocation for it. - VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); - pBlock->Init( - m_hAllocator, - m_hParentPool, - m_MemoryTypeIndex, - mem, - allocInfo.allocationSize, - m_NextBlockId++, - m_Algorithm, - m_BufferImageGranularity); - - m_Blocks.push_back(pBlock); - if (pNewBlockIndex != VMA_NULL) - { - *pNewBlockIndex = m_Blocks.size() - 1; - } - - return VK_SUCCESS; -} - -bool VmaBlockVector::HasEmptyBlock() -{ - for (size_t index = 0, count = m_Blocks.size(); index < count; ++index) - { - VmaDeviceMemoryBlock* const pBlock = m_Blocks[index]; - if (pBlock->m_pMetadata->IsEmpty()) - { - return true; - } - } - return false; -} - -#if VMA_STATS_STRING_ENABLED -void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) -{ - VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - - - json.BeginObject(); - for (size_t i = 0; i < m_Blocks.size(); ++i) - { - json.BeginString(); - json.ContinueString(m_Blocks[i]->GetId()); - json.EndString(); - - json.BeginObject(); - json.WriteString("MapRefCount"); - json.WriteNumber(m_Blocks[i]->GetMapRefCount()); - - m_Blocks[i]->m_pMetadata->PrintDetailedMap(json); - json.EndObject(); - } - json.EndObject(); -} -#endif // VMA_STATS_STRING_ENABLED - -VkResult VmaBlockVector::CheckCorruption() -{ - if (!IsCorruptionDetectionEnabled()) - { - return VK_ERROR_FEATURE_NOT_PRESENT; - } - - VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) - { - VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pBlock); - VkResult res = pBlock->CheckCorruption(m_hAllocator); - if (res != VK_SUCCESS) - { - return res; - } - } - return VK_SUCCESS; -} - -#endif // _VMA_BLOCK_VECTOR_FUNCTIONS - -#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS -VmaDefragmentationContext_T::VmaDefragmentationContext_T( - VmaAllocator hAllocator, - const VmaDefragmentationInfo& info) - : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass), - m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass), - m_MoveAllocator(hAllocator->GetAllocationCallbacks()), - m_Moves(m_MoveAllocator) -{ - m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK; - - if (info.pool != VMA_NULL) - { - m_BlockVectorCount = 1; - m_PoolBlockVector = &info.pool->m_BlockVector; - m_pBlockVectors = &m_PoolBlockVector; - m_PoolBlockVector->SetIncrementalSort(false); - m_PoolBlockVector->SortByFreeSize(); - } - else - { - m_BlockVectorCount = hAllocator->GetMemoryTypeCount(); - m_PoolBlockVector = VMA_NULL; - m_pBlockVectors = hAllocator->m_pBlockVectors; - for (uint32_t i = 0; i < m_BlockVectorCount; ++i) - { - VmaBlockVector* vector = m_pBlockVectors[i]; - if (vector != VMA_NULL) - { - vector->SetIncrementalSort(false); - vector->SortByFreeSize(); - } - } - } - - switch (m_Algorithm) - { - case 0: // Default algorithm - m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT; - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: - { - m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount); - break; - } - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: - { - if (hAllocator->GetBufferImageGranularity() > 1) - { - m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount); - } - break; - } - } -} - -VmaDefragmentationContext_T::~VmaDefragmentationContext_T() -{ - if (m_PoolBlockVector != VMA_NULL) - { - m_PoolBlockVector->SetIncrementalSort(true); - } - else - { - for (uint32_t i = 0; i < m_BlockVectorCount; ++i) - { - VmaBlockVector* vector = m_pBlockVectors[i]; - if (vector != VMA_NULL) - vector->SetIncrementalSort(true); - } - } - - if (m_AlgorithmState) - { - switch (m_Algorithm) - { - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: - vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); - break; - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: - vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); - break; - default: - VMA_ASSERT(0); - } - } -} - -VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo) -{ - if (m_PoolBlockVector != VMA_NULL) - { - VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex); - - if (m_PoolBlockVector->GetBlockCount() > 1) - ComputeDefragmentation(*m_PoolBlockVector, 0); - else if (m_PoolBlockVector->GetBlockCount() == 1) - ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0)); - } - else - { - for (uint32_t i = 0; i < m_BlockVectorCount; ++i) - { - if (m_pBlockVectors[i] != VMA_NULL) - { - VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex); - - if (m_pBlockVectors[i]->GetBlockCount() > 1) - { - if (ComputeDefragmentation(*m_pBlockVectors[i], i)) - break; - } - else if (m_pBlockVectors[i]->GetBlockCount() == 1) - { - if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0))) - break; - } - } - } - } - - moveInfo.moveCount = static_cast(m_Moves.size()); - if (moveInfo.moveCount > 0) - { - moveInfo.pMoves = m_Moves.data(); - return VK_INCOMPLETE; - } - - moveInfo.pMoves = VMA_NULL; - return VK_SUCCESS; -} - -VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo) -{ - VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true); - - VkResult result = VK_SUCCESS; - VmaStlAllocator blockAllocator(m_MoveAllocator.m_pCallbacks); - VmaVector> immovableBlocks(blockAllocator); - VmaVector> mappedBlocks(blockAllocator); - - VmaAllocator allocator = VMA_NULL; - for (uint32_t i = 0; i < moveInfo.moveCount; ++i) - { - VmaDefragmentationMove& move = moveInfo.pMoves[i]; - size_t prevCount = 0, currentCount = 0; - VkDeviceSize freedBlockSize = 0; - - uint32_t vectorIndex; - VmaBlockVector* vector; - if (m_PoolBlockVector != VMA_NULL) - { - vectorIndex = 0; - vector = m_PoolBlockVector; - } - else - { - vectorIndex = move.srcAllocation->GetMemoryTypeIndex(); - vector = m_pBlockVectors[vectorIndex]; - VMA_ASSERT(vector != VMA_NULL); - } - - switch (move.operation) - { - case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY: - { - uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation); - if (mapCount > 0) - { - allocator = vector->m_hAllocator; - VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock(); - bool notPresent = true; - for (FragmentedBlock& block : mappedBlocks) - { - if (block.block == newMapBlock) - { - notPresent = false; - block.data += mapCount; - break; - } - } - if (notPresent) - mappedBlocks.push_back({ mapCount, newMapBlock }); - } - - // Scope for locks, Free have it's own lock - { - VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - prevCount = vector->GetBlockCount(); - freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); - } - vector->Free(move.dstTmpAllocation); - { - VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - currentCount = vector->GetBlockCount(); - } - - result = VK_INCOMPLETE; - break; - } - case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE: - { - m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); - --m_PassStats.allocationsMoved; - vector->Free(move.dstTmpAllocation); - - VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock(); - bool notPresent = true; - for (const FragmentedBlock& block : immovableBlocks) - { - if (block.block == newBlock) - { - notPresent = false; - break; - } - } - if (notPresent) - immovableBlocks.push_back({ vectorIndex, newBlock }); - break; - } - case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY: - { - m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); - --m_PassStats.allocationsMoved; - // Scope for locks, Free have it's own lock - { - VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - prevCount = vector->GetBlockCount(); - freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize(); - } - vector->Free(move.srcAllocation); - { - VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - currentCount = vector->GetBlockCount(); - } - freedBlockSize *= prevCount - currentCount; - - VkDeviceSize dstBlockSize; - { - VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); - } - vector->Free(move.dstTmpAllocation); - { - VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount()); - currentCount = vector->GetBlockCount(); - } - - result = VK_INCOMPLETE; - break; - } - default: - VMA_ASSERT(0); - } - - if (prevCount > currentCount) - { - size_t freedBlocks = prevCount - currentCount; - m_PassStats.deviceMemoryBlocksFreed += static_cast(freedBlocks); - m_PassStats.bytesFreed += freedBlockSize; - } - - switch (m_Algorithm) - { - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: - { - if (m_AlgorithmState != VMA_NULL) - { - // Avoid unnecessary tries to allocate when new free block is avaiable - StateExtensive& state = reinterpret_cast(m_AlgorithmState)[vectorIndex]; - if (state.firstFreeBlock != SIZE_MAX) - { - const size_t diff = prevCount - currentCount; - if (state.firstFreeBlock >= diff) - { - state.firstFreeBlock -= diff; - if (state.firstFreeBlock != 0) - state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty(); - } - else - state.firstFreeBlock = 0; - } - } - } - } - } - moveInfo.moveCount = 0; - moveInfo.pMoves = VMA_NULL; - m_Moves.clear(); - - // Update stats - m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved; - m_GlobalStats.bytesFreed += m_PassStats.bytesFreed; - m_GlobalStats.bytesMoved += m_PassStats.bytesMoved; - m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed; - m_PassStats = { 0 }; - - // Move blocks with immovable allocations according to algorithm - if (immovableBlocks.size() > 0) - { - switch (m_Algorithm) - { - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: - { - if (m_AlgorithmState != VMA_NULL) - { - bool swapped = false; - // Move to the start of free blocks range - for (const FragmentedBlock& block : immovableBlocks) - { - StateExtensive& state = reinterpret_cast(m_AlgorithmState)[block.data]; - if (state.operation != StateExtensive::Operation::Cleanup) - { - VmaBlockVector* vector = m_pBlockVectors[block.data]; - VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - - for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i) - { - if (vector->GetBlock(i) == block.block) - { - VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]); - if (state.firstFreeBlock != SIZE_MAX) - { - if (i + 1 < state.firstFreeBlock) - { - if (state.firstFreeBlock > 1) - VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]); - else - --state.firstFreeBlock; - } - } - swapped = true; - break; - } - } - } - } - if (swapped) - result = VK_INCOMPLETE; - break; - } - } - default: - { - // Move to the begining - for (const FragmentedBlock& block : immovableBlocks) - { - VmaBlockVector* vector = m_pBlockVectors[block.data]; - VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - - for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i) - { - if (vector->GetBlock(i) == block.block) - { - VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]); - break; - } - } - } - break; - } - } - } - - // Bulk-map destination blocks - for (const FragmentedBlock& block : mappedBlocks) - { - VkResult res = block.block->Map(allocator, block.data, VMA_NULL); - VMA_ASSERT(res == VK_SUCCESS); - } - return result; -} - -bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index) -{ - switch (m_Algorithm) - { - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT: - return ComputeDefragmentation_Fast(vector); - default: - VMA_ASSERT(0); - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: - return ComputeDefragmentation_Balanced(vector, index, true); - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT: - return ComputeDefragmentation_Full(vector); - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: - return ComputeDefragmentation_Extensive(vector, index); - } -} - -VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData( - VmaAllocHandle handle, VmaBlockMetadata* metadata) -{ - MoveAllocationData moveData; - moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle); - moveData.size = moveData.move.srcAllocation->GetSize(); - moveData.alignment = moveData.move.srcAllocation->GetAlignment(); - moveData.type = moveData.move.srcAllocation->GetSuballocationType(); - moveData.flags = 0; - - if (moveData.move.srcAllocation->IsPersistentMap()) - moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT; - if (moveData.move.srcAllocation->IsMappingAllowed()) - moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; - - return moveData; -} - -VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes) -{ - // Ignore allocation if will exceed max size for copy - if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes) - { - if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE) - return CounterStatus::Ignore; - else - return CounterStatus::End; - } - return CounterStatus::Pass; -} - -bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes) -{ - m_PassStats.bytesMoved += bytes; - // Early return when max found - if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes) - { - VMA_ASSERT(m_PassStats.allocationsMoved == m_MaxPassAllocations || - m_PassStats.bytesMoved == m_MaxPassBytes && "Exceeded maximal pass threshold!"); - return true; - } - return false; -} - -bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block) -{ - VmaBlockMetadata* metadata = block->m_pMetadata; - - for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) - { - MoveAllocationData moveData = GetMoveData(handle, metadata); - // Ignore newly created allocations by defragmentation algorithm - if (moveData.move.srcAllocation->GetUserData() == this) - continue; - switch (CheckCounters(moveData.move.srcAllocation->GetSize())) - { - case CounterStatus::Ignore: - continue; - case CounterStatus::End: - return true; - default: - VMA_ASSERT(0); - case CounterStatus::Pass: - break; - } - - VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); - if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size) - { - VmaAllocationRequest request = {}; - if (metadata->CreateAllocationRequest( - moveData.size, - moveData.alignment, - false, - moveData.type, - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - &request)) - { - if (metadata->GetAllocationOffset(request.allocHandle) < offset) - { - if (vector.CommitAllocationRequest( - request, - block, - moveData.alignment, - moveData.flags, - this, - moveData.type, - &moveData.move.dstTmpAllocation) == VK_SUCCESS) - { - m_Moves.push_back(moveData.move); - if (IncrementCounters(moveData.size)) - return true; - } - } - } - } - } - return false; -} - -bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector) -{ - for (; start < end; ++start) - { - VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start); - if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size) - { - if (vector.AllocateFromBlock(dstBlock, - data.size, - data.alignment, - data.flags, - this, - data.type, - 0, - &data.move.dstTmpAllocation) == VK_SUCCESS) - { - m_Moves.push_back(data.move); - if (IncrementCounters(data.size)) - return true; - break; - } - } - } - return false; -} - -bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector) -{ - // Move only between blocks - - // Go through allocations in last blocks and try to fit them inside first ones - for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) - { - VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; - - for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) - { - MoveAllocationData moveData = GetMoveData(handle, metadata); - // Ignore newly created allocations by defragmentation algorithm - if (moveData.move.srcAllocation->GetUserData() == this) - continue; - switch (CheckCounters(moveData.move.srcAllocation->GetSize())) - { - case CounterStatus::Ignore: - continue; - case CounterStatus::End: - return true; - default: - VMA_ASSERT(0); - case CounterStatus::Pass: - break; - } - - // Check all previous blocks for free space - if (AllocInOtherBlock(0, i, moveData, vector)) - return true; - } - } - return false; -} - -bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update) -{ - // Go over every allocation and try to fit it in previous blocks at lowest offsets, - // if not possible: realloc within single block to minimize offset (exclude offset == 0), - // but only if there are noticable gaps between them (some heuristic, ex. average size of allocation in block) - VMA_ASSERT(m_AlgorithmState != VMA_NULL); - - StateBalanced& vectorState = reinterpret_cast(m_AlgorithmState)[index]; - if (update && vectorState.avgAllocSize == UINT64_MAX) - UpdateVectorStatistics(vector, vectorState); - - const size_t startMoveCount = m_Moves.size(); - VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2; - for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) - { - VmaDeviceMemoryBlock* block = vector.GetBlock(i); - VmaBlockMetadata* metadata = block->m_pMetadata; - VkDeviceSize prevFreeRegionSize = 0; - - for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) - { - MoveAllocationData moveData = GetMoveData(handle, metadata); - // Ignore newly created allocations by defragmentation algorithm - if (moveData.move.srcAllocation->GetUserData() == this) - continue; - switch (CheckCounters(moveData.move.srcAllocation->GetSize())) - { - case CounterStatus::Ignore: - continue; - case CounterStatus::End: - return true; - default: - VMA_ASSERT(0); - case CounterStatus::Pass: - break; - } - - // Check all previous blocks for free space - const size_t prevMoveCount = m_Moves.size(); - if (AllocInOtherBlock(0, i, moveData, vector)) - return true; - - VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle); - // If no room found then realloc within block for lower offset - VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); - if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) - { - // Check if realloc will make sense - if (prevFreeRegionSize >= minimalFreeRegion || - nextFreeRegionSize >= minimalFreeRegion || - moveData.size <= vectorState.avgFreeSize || - moveData.size <= vectorState.avgAllocSize) - { - VmaAllocationRequest request = {}; - if (metadata->CreateAllocationRequest( - moveData.size, - moveData.alignment, - false, - moveData.type, - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - &request)) - { - if (metadata->GetAllocationOffset(request.allocHandle) < offset) - { - if (vector.CommitAllocationRequest( - request, - block, - moveData.alignment, - moveData.flags, - this, - moveData.type, - &moveData.move.dstTmpAllocation) == VK_SUCCESS) - { - m_Moves.push_back(moveData.move); - if (IncrementCounters(moveData.size)) - return true; - } - } - } - } - } - prevFreeRegionSize = nextFreeRegionSize; - } - } - - // No moves perfomed, update statistics to current vector state - if (startMoveCount == m_Moves.size() && !update) - { - vectorState.avgAllocSize = UINT64_MAX; - return ComputeDefragmentation_Balanced(vector, index, false); - } - return false; -} - -bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector) -{ - // Go over every allocation and try to fit it in previous blocks at lowest offsets, - // if not possible: realloc within single block to minimize offset (exclude offset == 0) - - for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) - { - VmaDeviceMemoryBlock* block = vector.GetBlock(i); - VmaBlockMetadata* metadata = block->m_pMetadata; - - for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) - { - MoveAllocationData moveData = GetMoveData(handle, metadata); - // Ignore newly created allocations by defragmentation algorithm - if (moveData.move.srcAllocation->GetUserData() == this) - continue; - switch (CheckCounters(moveData.move.srcAllocation->GetSize())) - { - case CounterStatus::Ignore: - continue; - case CounterStatus::End: - return true; - default: - VMA_ASSERT(0); - case CounterStatus::Pass: - break; - } - - // Check all previous blocks for free space - const size_t prevMoveCount = m_Moves.size(); - if (AllocInOtherBlock(0, i, moveData, vector)) - return true; - - // If no room found then realloc within block for lower offset - VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); - if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) - { - VmaAllocationRequest request = {}; - if (metadata->CreateAllocationRequest( - moveData.size, - moveData.alignment, - false, - moveData.type, - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - &request)) - { - if (metadata->GetAllocationOffset(request.allocHandle) < offset) - { - if (vector.CommitAllocationRequest( - request, - block, - moveData.alignment, - moveData.flags, - this, - moveData.type, - &moveData.move.dstTmpAllocation) == VK_SUCCESS) - { - m_Moves.push_back(moveData.move); - if (IncrementCounters(moveData.size)) - return true; - } - } - } - } - } - } - return false; -} - -bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index) -{ - // First free single block, then populate it to the brim, then free another block, and so on - - // Fallback to previous algorithm since without granularity conflicts it can achieve max packing - if (vector.m_BufferImageGranularity == 1) - return ComputeDefragmentation_Full(vector); - - VMA_ASSERT(m_AlgorithmState != VMA_NULL); - - StateExtensive& vectorState = reinterpret_cast(m_AlgorithmState)[index]; - - bool texturePresent = false, bufferPresent = false, otherPresent = false; - switch (vectorState.operation) - { - case StateExtensive::Operation::Done: // Vector defragmented - return false; - case StateExtensive::Operation::FindFreeBlockBuffer: - case StateExtensive::Operation::FindFreeBlockTexture: - case StateExtensive::Operation::FindFreeBlockAll: - { - // No more blocks to free, just perform fast realloc and move to cleanup - if (vectorState.firstFreeBlock == 0) - { - vectorState.operation = StateExtensive::Operation::Cleanup; - return ComputeDefragmentation_Fast(vector); - } - - // No free blocks, have to clear last one - size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1; - VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata; - - const size_t prevMoveCount = m_Moves.size(); - for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = freeMetadata->GetNextAllocation(handle)) - { - MoveAllocationData moveData = GetMoveData(handle, freeMetadata); - switch (CheckCounters(moveData.move.srcAllocation->GetSize())) - { - case CounterStatus::Ignore: - continue; - case CounterStatus::End: - return true; - default: - VMA_ASSERT(0); - case CounterStatus::Pass: - break; - } - - // Check all previous blocks for free space - if (AllocInOtherBlock(0, last, moveData, vector)) - { - // Full clear performed already - if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE) - reinterpret_cast(m_AlgorithmState)[index] = last; - return true; - } - } - - if (prevMoveCount == m_Moves.size()) - { - // Cannot perform full clear, have to move data in other blocks around - if (last != 0) - { - for (size_t i = last - 1; i; --i) - { - if (ReallocWithinBlock(vector, vector.GetBlock(i))) - return true; - } - } - - if (prevMoveCount == m_Moves.size()) - { - // No possible reallocs within blocks, try to move them around fast - return ComputeDefragmentation_Fast(vector); - } - } - else - { - switch (vectorState.operation) - { - case StateExtensive::Operation::FindFreeBlockBuffer: - vectorState.operation = StateExtensive::Operation::MoveBuffers; - break; - default: - VMA_ASSERT(0); - case StateExtensive::Operation::FindFreeBlockTexture: - vectorState.operation = StateExtensive::Operation::MoveTextures; - break; - case StateExtensive::Operation::FindFreeBlockAll: - vectorState.operation = StateExtensive::Operation::MoveAll; - break; - } - vectorState.firstFreeBlock = last; - // Nothing done, block found without reallocations, can perform another reallocs in same pass - return ComputeDefragmentation_Extensive(vector, index); - } - break; - } - case StateExtensive::Operation::MoveTextures: - { - if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector, - vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) - { - if (texturePresent) - { - vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture; - return ComputeDefragmentation_Extensive(vector, index); - } - - if (!bufferPresent && !otherPresent) - { - vectorState.operation = StateExtensive::Operation::Cleanup; - break; - } - - // No more textures to move, check buffers - vectorState.operation = StateExtensive::Operation::MoveBuffers; - bufferPresent = false; - otherPresent = false; - } - else - break; - } - case StateExtensive::Operation::MoveBuffers: - { - if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector, - vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) - { - if (bufferPresent) - { - vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; - return ComputeDefragmentation_Extensive(vector, index); - } - - if (!otherPresent) - { - vectorState.operation = StateExtensive::Operation::Cleanup; - break; - } - - // No more buffers to move, check all others - vectorState.operation = StateExtensive::Operation::MoveAll; - otherPresent = false; - } - else - break; - } - case StateExtensive::Operation::MoveAll: - { - if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector, - vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) - { - if (otherPresent) - { - vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; - return ComputeDefragmentation_Extensive(vector, index); - } - // Everything moved - vectorState.operation = StateExtensive::Operation::Cleanup; - } - break; - } - case StateExtensive::Operation::Cleanup: - // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062). - break; - } - - if (vectorState.operation == StateExtensive::Operation::Cleanup) - { - // All other work done, pack data in blocks even tighter if possible - const size_t prevMoveCount = m_Moves.size(); - for (size_t i = 0; i < vector.GetBlockCount(); ++i) - { - if (ReallocWithinBlock(vector, vector.GetBlock(i))) - return true; - } - - if (prevMoveCount == m_Moves.size()) - vectorState.operation = StateExtensive::Operation::Done; - } - return false; -} - -void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state) -{ - size_t allocCount = 0; - size_t freeCount = 0; - state.avgFreeSize = 0; - state.avgAllocSize = 0; - - for (size_t i = 0; i < vector.GetBlockCount(); ++i) - { - VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; - - allocCount += metadata->GetAllocationCount(); - freeCount += metadata->GetFreeRegionsCount(); - state.avgFreeSize += metadata->GetSumFreeSize(); - state.avgAllocSize += metadata->GetSize(); - } - - state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount; - state.avgFreeSize /= freeCount; -} - -bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType, - VmaBlockVector& vector, size_t firstFreeBlock, - bool& texturePresent, bool& bufferPresent, bool& otherPresent) -{ - const size_t prevMoveCount = m_Moves.size(); - for (size_t i = firstFreeBlock ; i;) - { - VmaDeviceMemoryBlock* block = vector.GetBlock(--i); - VmaBlockMetadata* metadata = block->m_pMetadata; - - for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) - { - MoveAllocationData moveData = GetMoveData(handle, metadata); - // Ignore newly created allocations by defragmentation algorithm - if (moveData.move.srcAllocation->GetUserData() == this) - continue; - switch (CheckCounters(moveData.move.srcAllocation->GetSize())) - { - case CounterStatus::Ignore: - continue; - case CounterStatus::End: - return true; - default: - VMA_ASSERT(0); - case CounterStatus::Pass: - break; - } - - // Move only single type of resources at once - if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType)) - { - // Try to fit allocation into free blocks - if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector)) - return false; - } - - if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)) - texturePresent = true; - else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER)) - bufferPresent = true; - else - otherPresent = true; - } - } - return prevMoveCount == m_Moves.size(); -} -#endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS - -#ifndef _VMA_POOL_T_FUNCTIONS -VmaPool_T::VmaPool_T( - VmaAllocator hAllocator, - const VmaPoolCreateInfo& createInfo, - VkDeviceSize preferredBlockSize) - : m_BlockVector( - hAllocator, - this, // hParentPool - createInfo.memoryTypeIndex, - createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, - createInfo.minBlockCount, - createInfo.maxBlockCount, - (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), - createInfo.blockSize != 0, // explicitBlockSize - createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm - createInfo.priority, - VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment), - createInfo.pMemoryAllocateNext), - m_Id(0), - m_Name(VMA_NULL) {} - -VmaPool_T::~VmaPool_T() -{ - VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL); -} - -void VmaPool_T::SetName(const char* pName) -{ - const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); - VmaFreeString(allocs, m_Name); - - if (pName != VMA_NULL) - { - m_Name = VmaCreateStringCopy(allocs, pName); - } - else - { - m_Name = VMA_NULL; - } -} -#endif // _VMA_POOL_T_FUNCTIONS - -#ifndef _VMA_ALLOCATOR_T_FUNCTIONS -VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : - m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), - m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0), - m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), - m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0), - m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0), - m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0), - m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0), - m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0), - m_hDevice(pCreateInfo->device), - m_hInstance(pCreateInfo->instance), - m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), - m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? - *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks), - m_AllocationObjectAllocator(&m_AllocationCallbacks), - m_HeapSizeLimitMask(0), - m_DeviceMemoryCount(0), - m_PreferredLargeHeapBlockSize(0), - m_PhysicalDevice(pCreateInfo->physicalDevice), - m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), - m_NextPoolId(0), - m_GlobalMemoryTypeBits(UINT32_MAX) -{ - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - m_UseKhrDedicatedAllocation = false; - m_UseKhrBindMemory2 = false; - } - - if(VMA_DEBUG_DETECT_CORRUPTION) - { - // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it. - VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0); - } - - VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance); - - if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) - { -#if !(VMA_DEDICATED_ALLOCATION) - if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) - { - VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros."); - } -#endif -#if !(VMA_BIND_MEMORY2) - if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) - { - VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros."); - } -#endif - } -#if !(VMA_MEMORY_BUDGET) - if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) - { - VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros."); - } -#endif -#if !(VMA_BUFFER_DEVICE_ADDRESS) - if(m_UseKhrBufferDeviceAddress) - { - VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); - } -#endif -#if VMA_VULKAN_VERSION < 1002000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0)) - { - VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros."); - } -#endif -#if VMA_VULKAN_VERSION < 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros."); - } -#endif -#if !(VMA_MEMORY_PRIORITY) - if(m_UseExtMemoryPriority) - { - VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); - } -#endif - - memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks)); - memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties)); - memset(&m_MemProps, 0, sizeof(m_MemProps)); - - memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors)); - memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions)); - -#if VMA_EXTERNAL_MEMORY - memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes)); -#endif // #if VMA_EXTERNAL_MEMORY - - if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) - { - m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData; - m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate; - m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree; - } - - ImportVulkanFunctions(pCreateInfo->pVulkanFunctions); - - (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties); - (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps); - - VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT)); - VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY)); - VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity)); - VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize)); - - m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ? - pCreateInfo->preferredLargeHeapBlockSize : static_cast(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); - - m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits(); - -#if VMA_EXTERNAL_MEMORY - if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL) - { - memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes, - sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount()); - } -#endif // #if VMA_EXTERNAL_MEMORY - - if(pCreateInfo->pHeapSizeLimit != VMA_NULL) - { - for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) - { - const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex]; - if(limit != VK_WHOLE_SIZE) - { - m_HeapSizeLimitMask |= 1u << heapIndex; - if(limit < m_MemProps.memoryHeaps[heapIndex].size) - { - m_MemProps.memoryHeaps[heapIndex].size = limit; - } - } - } - } - - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - // Create only supported types - if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) - { - const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex); - m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)( - this, - VK_NULL_HANDLE, // hParentPool - memTypeIndex, - preferredBlockSize, - 0, - SIZE_MAX, - GetBufferImageGranularity(), - false, // explicitBlockSize - 0, // algorithm - 0.5f, // priority (0.5 is the default per Vulkan spec) - GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment - VMA_NULL); // // pMemoryAllocateNext - // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, - // becase minBlockCount is 0. - } - } -} - -VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo) -{ - VkResult res = VK_SUCCESS; - -#if VMA_MEMORY_BUDGET - if(m_UseExtMemoryBudget) - { - UpdateVulkanBudget(); - } -#endif // #if VMA_MEMORY_BUDGET - - return res; -} - -VmaAllocator_T::~VmaAllocator_T() -{ - VMA_ASSERT(m_Pools.IsEmpty()); - - for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; ) - { - vma_delete(this, m_pBlockVectors[memTypeIndex]); - } -} - -void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions) -{ -#if VMA_STATIC_VULKAN_FUNCTIONS == 1 - ImportVulkanFunctions_Static(); -#endif - - if(pVulkanFunctions != VMA_NULL) - { - ImportVulkanFunctions_Custom(pVulkanFunctions); - } - -#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 - ImportVulkanFunctions_Dynamic(); -#endif - - ValidateVulkanFunctions(); -} - -#if VMA_STATIC_VULKAN_FUNCTIONS == 1 - -void VmaAllocator_T::ImportVulkanFunctions_Static() -{ - // Vulkan 1.0 - m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr; - m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr; - m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties; - m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties; - m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; - m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory; - m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory; - m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory; - m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges; - m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges; - m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory; - m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory; - m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements; - m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements; - m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer; - m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer; - m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage; - m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage; - m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer; - - // Vulkan 1.1 -#if VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2; - m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2; - m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2; - m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2; - m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2; - } -#endif - -#if VMA_VULKAN_VERSION >= 1003000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) - { - m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements; - m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements; - } -#endif -} - -#endif // VMA_STATIC_VULKAN_FUNCTIONS == 1 - -void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions) -{ - VMA_ASSERT(pVulkanFunctions != VMA_NULL); - -#define VMA_COPY_IF_NOT_NULL(funcName) \ - if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; - - VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr); - VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr); - VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties); - VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties); - VMA_COPY_IF_NOT_NULL(vkAllocateMemory); - VMA_COPY_IF_NOT_NULL(vkFreeMemory); - VMA_COPY_IF_NOT_NULL(vkMapMemory); - VMA_COPY_IF_NOT_NULL(vkUnmapMemory); - VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges); - VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges); - VMA_COPY_IF_NOT_NULL(vkBindBufferMemory); - VMA_COPY_IF_NOT_NULL(vkBindImageMemory); - VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements); - VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements); - VMA_COPY_IF_NOT_NULL(vkCreateBuffer); - VMA_COPY_IF_NOT_NULL(vkDestroyBuffer); - VMA_COPY_IF_NOT_NULL(vkCreateImage); - VMA_COPY_IF_NOT_NULL(vkDestroyImage); - VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer); - -#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR); - VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR); -#endif - -#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 - VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR); - VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR); -#endif - -#if VMA_MEMORY_BUDGET - VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR); -#endif - -#if VMA_VULKAN_VERSION >= 1003000 - VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements); - VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements); -#endif - -#undef VMA_COPY_IF_NOT_NULL -} - -#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 - -void VmaAllocator_T::ImportVulkanFunctions_Dynamic() -{ - VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr && - "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass " - "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. " - "Other members can be null."); - -#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \ - if(m_VulkanFunctions.memberName == VMA_NULL) \ - m_VulkanFunctions.memberName = \ - (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString); -#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \ - if(m_VulkanFunctions.memberName == VMA_NULL) \ - m_VulkanFunctions.memberName = \ - (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString); - - VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties"); - VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties"); - VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory"); - VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory"); - VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory"); - VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory"); - VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges"); - VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges"); - VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory"); - VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory"); - VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements"); - VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements"); - VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer"); - VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer"); - VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage"); - VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage"); - VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer"); - -#if VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2"); - VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2"); - VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2"); - VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2"); - VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2"); - } -#endif - -#if VMA_DEDICATED_ALLOCATION - if(m_UseKhrDedicatedAllocation) - { - VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR"); - VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR"); - } -#endif - -#if VMA_BIND_MEMORY2 - if(m_UseKhrBindMemory2) - { - VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR"); - VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR"); - } -#endif // #if VMA_BIND_MEMORY2 - -#if VMA_MEMORY_BUDGET - if(m_UseExtMemoryBudget) - { - VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); - } -#endif // #if VMA_MEMORY_BUDGET - -#if VMA_VULKAN_VERSION >= 1003000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) - { - VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements"); - VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements"); - } -#endif - -#undef VMA_FETCH_DEVICE_FUNC -#undef VMA_FETCH_INSTANCE_FUNC -} - -#endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 - -void VmaAllocator_T::ValidateVulkanFunctions() -{ - VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL); - -#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation) - { - VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL); - } -#endif - -#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2) - { - VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL); - } -#endif - -#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 - if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL); - } -#endif - -#if VMA_VULKAN_VERSION >= 1003000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) - { - VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL); - } -#endif -} - -VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) -{ - const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); - const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; - const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE; - return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32); -} - -VkResult VmaAllocator_T::AllocateMemoryOfType( - VmaPool pool, - VkDeviceSize size, - VkDeviceSize alignment, - bool dedicatedPreferred, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - VkFlags dedicatedBufferImageUsage, - const VmaAllocationCreateInfo& createInfo, - uint32_t memTypeIndex, - VmaSuballocationType suballocType, - VmaDedicatedAllocationList& dedicatedAllocations, - VmaBlockVector& blockVector, - size_t allocationCount, - VmaAllocation* pAllocations) -{ - VMA_ASSERT(pAllocations != VMA_NULL); - VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size); - - VmaAllocationCreateInfo finalCreateInfo = createInfo; - VkResult res = CalcMemTypeParams( - finalCreateInfo, - memTypeIndex, - size, - allocationCount); - if(res != VK_SUCCESS) - return res; - - if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) - { - return AllocateDedicatedMemory( - pool, - size, - suballocType, - dedicatedAllocations, - memTypeIndex, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, - (finalCreateInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, - finalCreateInfo.pUserData, - finalCreateInfo.priority, - dedicatedBuffer, - dedicatedImage, - dedicatedBufferImageUsage, - allocationCount, - pAllocations, - blockVector.GetAllocationNextPtr()); - } - else - { - const bool canAllocateDedicated = - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && - (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize()); - - if(canAllocateDedicated) - { - // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. - if(size > blockVector.GetPreferredBlockSize() / 2) - { - dedicatedPreferred = true; - } - // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget, - // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above - // 3/4 of the maximum allocation count. - if(m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4) - { - dedicatedPreferred = false; - } - - if(dedicatedPreferred) - { - res = AllocateDedicatedMemory( - pool, - size, - suballocType, - dedicatedAllocations, - memTypeIndex, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, - (finalCreateInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, - finalCreateInfo.pUserData, - finalCreateInfo.priority, - dedicatedBuffer, - dedicatedImage, - dedicatedBufferImageUsage, - allocationCount, - pAllocations, - blockVector.GetAllocationNextPtr()); - if(res == VK_SUCCESS) - { - // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here. - VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); - return VK_SUCCESS; - } - } - } - - res = blockVector.Allocate( - size, - alignment, - finalCreateInfo, - suballocType, - allocationCount, - pAllocations); - if(res == VK_SUCCESS) - return VK_SUCCESS; - - // Try dedicated memory. - if(canAllocateDedicated && !dedicatedPreferred) - { - res = AllocateDedicatedMemory( - pool, - size, - suballocType, - dedicatedAllocations, - memTypeIndex, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, - (finalCreateInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, - finalCreateInfo.pUserData, - finalCreateInfo.priority, - dedicatedBuffer, - dedicatedImage, - dedicatedBufferImageUsage, - allocationCount, - pAllocations, - blockVector.GetAllocationNextPtr()); - if(res == VK_SUCCESS) - { - // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here. - VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); - return VK_SUCCESS; - } - } - // Everything failed: Return error code. - VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); - return res; - } -} - -VkResult VmaAllocator_T::AllocateDedicatedMemory( - VmaPool pool, - VkDeviceSize size, - VmaSuballocationType suballocType, - VmaDedicatedAllocationList& dedicatedAllocations, - uint32_t memTypeIndex, - bool map, - bool isUserDataString, - bool isMappingAllowed, - bool canAliasMemory, - void* pUserData, - float priority, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - VkFlags dedicatedBufferImageUsage, - size_t allocationCount, - VmaAllocation* pAllocations, - const void* pNextChain) -{ - VMA_ASSERT(allocationCount > 0 && pAllocations); - - VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; - allocInfo.memoryTypeIndex = memTypeIndex; - allocInfo.allocationSize = size; - allocInfo.pNext = pNextChain; - -#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR }; - if(!canAliasMemory) - { - if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - if(dedicatedBuffer != VK_NULL_HANDLE) - { - VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE); - dedicatedAllocInfo.buffer = dedicatedBuffer; - VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); - } - else if(dedicatedImage != VK_NULL_HANDLE) - { - dedicatedAllocInfo.image = dedicatedImage; - VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); - } - } - } -#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - -#if VMA_BUFFER_DEVICE_ADDRESS - VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; - if(m_UseKhrBufferDeviceAddress) - { - bool canContainBufferWithDeviceAddress = true; - if(dedicatedBuffer != VK_NULL_HANDLE) - { - canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == UINT32_MAX || // Usage flags unknown - (dedicatedBufferImageUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0; - } - else if(dedicatedImage != VK_NULL_HANDLE) - { - canContainBufferWithDeviceAddress = false; - } - if(canContainBufferWithDeviceAddress) - { - allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; - VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); - } - } -#endif // #if VMA_BUFFER_DEVICE_ADDRESS - -#if VMA_MEMORY_PRIORITY - VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; - if(m_UseExtMemoryPriority) - { - VMA_ASSERT(priority >= 0.f && priority <= 1.f); - priorityInfo.priority = priority; - VmaPnextChainPushFront(&allocInfo, &priorityInfo); - } -#endif // #if VMA_MEMORY_PRIORITY - -#if VMA_EXTERNAL_MEMORY - // Attach VkExportMemoryAllocateInfoKHR if necessary. - VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; - exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex); - if(exportMemoryAllocInfo.handleTypes != 0) - { - VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); - } -#endif // #if VMA_EXTERNAL_MEMORY - - size_t allocIndex; - VkResult res = VK_SUCCESS; - for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) - { - res = AllocateDedicatedMemoryPage( - pool, - size, - suballocType, - memTypeIndex, - allocInfo, - map, - isUserDataString, - isMappingAllowed, - pUserData, - pAllocations + allocIndex); - if(res != VK_SUCCESS) - { - break; - } - } - - if(res == VK_SUCCESS) - { - for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) - { - dedicatedAllocations.Register(pAllocations[allocIndex]); - } - VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex); - } - else - { - // Free all already created allocations. - while(allocIndex--) - { - VmaAllocation currAlloc = pAllocations[allocIndex]; - VkDeviceMemory hMemory = currAlloc->GetMemory(); - - /* - There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory - before vkFreeMemory. - - if(currAlloc->GetMappedData() != VMA_NULL) - { - (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); - } - */ - - FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory); - m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize()); - m_AllocationObjectAllocator.Free(currAlloc); - } - - memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); - } - - return res; -} - -VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( - VmaPool pool, - VkDeviceSize size, - VmaSuballocationType suballocType, - uint32_t memTypeIndex, - const VkMemoryAllocateInfo& allocInfo, - bool map, - bool isUserDataString, - bool isMappingAllowed, - void* pUserData, - VmaAllocation* pAllocation) -{ - VkDeviceMemory hMemory = VK_NULL_HANDLE; - VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory); - if(res < 0) - { - VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); - return res; - } - - void* pMappedData = VMA_NULL; - if(map) - { - res = (*m_VulkanFunctions.vkMapMemory)( - m_hDevice, - hMemory, - 0, - VK_WHOLE_SIZE, - 0, - &pMappedData); - if(res < 0) - { - VMA_DEBUG_LOG(" vkMapMemory FAILED"); - FreeVulkanMemory(memTypeIndex, size, hMemory); - return res; - } - } - - *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed); - (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size); - if (isUserDataString) - (*pAllocation)->SetName(this, (const char*)pUserData); - else - (*pAllocation)->SetUserData(this, pUserData); - m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); - if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) - { - FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); - } - - return VK_SUCCESS; -} - -void VmaAllocator_T::GetBufferMemoryRequirements( - VkBuffer hBuffer, - VkMemoryRequirements& memReq, - bool& requiresDedicatedAllocation, - bool& prefersDedicatedAllocation) const -{ -#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; - memReqInfo.buffer = hBuffer; - - VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; - - VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; - VmaPnextChainPushFront(&memReq2, &memDedicatedReq); - - (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); - - memReq = memReq2.memoryRequirements; - requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); - prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); - } - else -#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - { - (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq); - requiresDedicatedAllocation = false; - prefersDedicatedAllocation = false; - } -} - -void VmaAllocator_T::GetImageMemoryRequirements( - VkImage hImage, - VkMemoryRequirements& memReq, - bool& requiresDedicatedAllocation, - bool& prefersDedicatedAllocation) const -{ -#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; - memReqInfo.image = hImage; - - VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; - - VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; - VmaPnextChainPushFront(&memReq2, &memDedicatedReq); - - (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); - - memReq = memReq2.memoryRequirements; - requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); - prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); - } - else -#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - { - (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq); - requiresDedicatedAllocation = false; - prefersDedicatedAllocation = false; - } -} - -VkResult VmaAllocator_T::FindMemoryTypeIndex( - uint32_t memoryTypeBits, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - VkFlags bufImgUsage, - uint32_t* pMemoryTypeIndex) const -{ - memoryTypeBits &= GetGlobalMemoryTypeBits(); - - if(pAllocationCreateInfo->memoryTypeBits != 0) - { - memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; - } - - VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0; - if(!FindMemoryPreferences( - IsIntegratedGpu(), - *pAllocationCreateInfo, - bufImgUsage, - requiredFlags, preferredFlags, notPreferredFlags)) - { - return VK_ERROR_FEATURE_NOT_PRESENT; - } - - *pMemoryTypeIndex = UINT32_MAX; - uint32_t minCost = UINT32_MAX; - for(uint32_t memTypeIndex = 0, memTypeBit = 1; - memTypeIndex < GetMemoryTypeCount(); - ++memTypeIndex, memTypeBit <<= 1) - { - // This memory type is acceptable according to memoryTypeBits bitmask. - if((memTypeBit & memoryTypeBits) != 0) - { - const VkMemoryPropertyFlags currFlags = - m_MemProps.memoryTypes[memTypeIndex].propertyFlags; - // This memory type contains requiredFlags. - if((requiredFlags & ~currFlags) == 0) - { - // Calculate cost as number of bits from preferredFlags not present in this memory type. - uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) + - VMA_COUNT_BITS_SET(currFlags & notPreferredFlags); - // Remember memory type with lowest cost. - if(currCost < minCost) - { - *pMemoryTypeIndex = memTypeIndex; - if(currCost == 0) - { - return VK_SUCCESS; - } - minCost = currCost; - } - } - } - } - return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT; -} - -VkResult VmaAllocator_T::CalcMemTypeParams( - VmaAllocationCreateInfo& inoutCreateInfo, - uint32_t memTypeIndex, - VkDeviceSize size, - size_t allocationCount) -{ - // If memory type is not HOST_VISIBLE, disable MAPPED. - if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && - (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) - { - inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; - } - - if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && - (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0) - { - const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); - VmaBudget heapBudget = {}; - GetHeapBudgets(&heapBudget, heapIndex, 1); - if(heapBudget.usage + size * allocationCount > heapBudget.budget) - { - return VK_ERROR_OUT_OF_DEVICE_MEMORY; - } - } - return VK_SUCCESS; -} - -VkResult VmaAllocator_T::CalcAllocationParams( - VmaAllocationCreateInfo& inoutCreateInfo, - bool dedicatedRequired, - bool dedicatedPreferred) -{ - VMA_ASSERT((inoutCreateInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) && - "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect."); - VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 || - (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) && - "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); - if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST) - { - if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0) - { - VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 && - "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); - } - } - - // If memory is lazily allocated, it should be always dedicated. - if(dedicatedRequired || - inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED) - { - inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; - } - - if(inoutCreateInfo.pool != VK_NULL_HANDLE) - { - if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() && - (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) - { - VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations."); - return VK_ERROR_FEATURE_NOT_PRESENT; - } - inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority(); - } - - if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && - (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) - { - VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); - return VK_ERROR_FEATURE_NOT_PRESENT; - } - - if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY && - (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) - { - inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; - } - - // Non-auto USAGE values imply HOST_ACCESS flags. - // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools. - // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*. - // Otherwise they just protect from assert on mapping. - if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO && - inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE && - inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST) - { - if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0) - { - inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; - } - } - - return VK_SUCCESS; -} - -VkResult VmaAllocator_T::AllocateMemory( - const VkMemoryRequirements& vkMemReq, - bool requiresDedicatedAllocation, - bool prefersDedicatedAllocation, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - VkFlags dedicatedBufferImageUsage, - const VmaAllocationCreateInfo& createInfo, - VmaSuballocationType suballocType, - size_t allocationCount, - VmaAllocation* pAllocations) -{ - memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); - - VMA_ASSERT(VmaIsPow2(vkMemReq.alignment)); - - if(vkMemReq.size == 0) - { - return VK_ERROR_INITIALIZATION_FAILED; - } - - VmaAllocationCreateInfo createInfoFinal = createInfo; - VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation); - if(res != VK_SUCCESS) - return res; - - if(createInfoFinal.pool != VK_NULL_HANDLE) - { - VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector; - return AllocateMemoryOfType( - createInfoFinal.pool, - vkMemReq.size, - vkMemReq.alignment, - prefersDedicatedAllocation, - dedicatedBuffer, - dedicatedImage, - dedicatedBufferImageUsage, - createInfoFinal, - blockVector.GetMemoryTypeIndex(), - suballocType, - createInfoFinal.pool->m_DedicatedAllocations, - blockVector, - allocationCount, - pAllocations); - } - else - { - // Bit mask of memory Vulkan types acceptable for this allocation. - uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; - uint32_t memTypeIndex = UINT32_MAX; - res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); - // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. - if(res != VK_SUCCESS) - return res; - do - { - VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex]; - VMA_ASSERT(blockVector && "Trying to use unsupported memory type!"); - res = AllocateMemoryOfType( - VK_NULL_HANDLE, - vkMemReq.size, - vkMemReq.alignment, - requiresDedicatedAllocation || prefersDedicatedAllocation, - dedicatedBuffer, - dedicatedImage, - dedicatedBufferImageUsage, - createInfoFinal, - memTypeIndex, - suballocType, - m_DedicatedAllocations[memTypeIndex], - *blockVector, - allocationCount, - pAllocations); - // Allocation succeeded - if(res == VK_SUCCESS) - return VK_SUCCESS; - - // Remove old memTypeIndex from list of possibilities. - memoryTypeBits &= ~(1u << memTypeIndex); - // Find alternative memTypeIndex. - res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); - } while(res == VK_SUCCESS); - - // No other matching memory type index could be found. - // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. - return VK_ERROR_OUT_OF_DEVICE_MEMORY; - } -} - -void VmaAllocator_T::FreeMemory( - size_t allocationCount, - const VmaAllocation* pAllocations) -{ - VMA_ASSERT(pAllocations); - - for(size_t allocIndex = allocationCount; allocIndex--; ) - { - VmaAllocation allocation = pAllocations[allocIndex]; - - if(allocation != VK_NULL_HANDLE) - { - if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) - { - FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); - } - - allocation->FreeName(this); - - switch(allocation->GetType()) - { - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: - { - VmaBlockVector* pBlockVector = VMA_NULL; - VmaPool hPool = allocation->GetParentPool(); - if(hPool != VK_NULL_HANDLE) - { - pBlockVector = &hPool->m_BlockVector; - } - else - { - const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); - pBlockVector = m_pBlockVectors[memTypeIndex]; - VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!"); - } - pBlockVector->Free(allocation); - } - break; - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - FreeDedicatedMemory(allocation); - break; - default: - VMA_ASSERT(0); - } - } - } -} - -void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats) -{ - // Initialize. - VmaClearDetailedStatistics(pStats->total); - for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) - VmaClearDetailedStatistics(pStats->memoryType[i]); - for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) - VmaClearDetailedStatistics(pStats->memoryHeap[i]); - - // Process default pools. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; - if (pBlockVector != VMA_NULL) - pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]); - } - - // Process custom pools. - { - VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); - for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) - { - VmaBlockVector& blockVector = pool->m_BlockVector; - const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex(); - blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); - pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); - } - } - - // Process dedicated allocations. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]); - } - - // Sum from memory types to memory heaps. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex; - VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]); - } - - // Sum from memory heaps to total. - for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex) - VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]); - - VMA_ASSERT(pStats->total.statistics.allocationCount == 0 || - pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin); - VMA_ASSERT(pStats->total.unusedRangeCount == 0 || - pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin); -} - -void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount) -{ -#if VMA_MEMORY_BUDGET - if(m_UseExtMemoryBudget) - { - if(m_Budget.m_OperationsSinceBudgetFetch < 30) - { - VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex); - for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) - { - const uint32_t heapIndex = firstHeap + i; - - outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; - outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; - outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; - outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; - - if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) - { - outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] + - outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; - } - else - { - outBudgets->usage = 0; - } - - // Have to take MIN with heap size because explicit HeapSizeLimit is included in it. - outBudgets->budget = VMA_MIN( - m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size); - } - } - else - { - UpdateVulkanBudget(); // Outside of mutex lock - GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion - } - } - else -#endif - { - for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) - { - const uint32_t heapIndex = firstHeap + i; - - outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; - outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; - outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; - outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; - - outBudgets->usage = outBudgets->statistics.blockBytes; - outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. - } - } -} - -void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo) -{ - pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); - pAllocationInfo->deviceMemory = hAllocation->GetMemory(); - pAllocationInfo->offset = hAllocation->GetOffset(); - pAllocationInfo->size = hAllocation->GetSize(); - pAllocationInfo->pMappedData = hAllocation->GetMappedData(); - pAllocationInfo->pUserData = hAllocation->GetUserData(); - pAllocationInfo->pName = hAllocation->GetName(); -} - -VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) -{ - VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags); - - VmaPoolCreateInfo newCreateInfo = *pCreateInfo; - - // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash. - if(pCreateInfo->pMemoryAllocateNext) - { - VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0); - } - - if(newCreateInfo.maxBlockCount == 0) - { - newCreateInfo.maxBlockCount = SIZE_MAX; - } - if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) - { - return VK_ERROR_INITIALIZATION_FAILED; - } - // Memory type index out of range or forbidden. - if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || - ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0) - { - return VK_ERROR_FEATURE_NOT_PRESENT; - } - if(newCreateInfo.minAllocationAlignment > 0) - { - VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment)); - } - - const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex); - - *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); - - VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); - if(res != VK_SUCCESS) - { - vma_delete(this, *pPool); - *pPool = VMA_NULL; - return res; - } - - // Add to m_Pools. - { - VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); - (*pPool)->SetId(m_NextPoolId++); - m_Pools.PushBack(*pPool); - } - - return VK_SUCCESS; -} - -void VmaAllocator_T::DestroyPool(VmaPool pool) -{ - // Remove from m_Pools. - { - VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); - m_Pools.Remove(pool); - } - - vma_delete(this, pool); -} - -void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats) -{ - VmaClearStatistics(*pPoolStats); - pool->m_BlockVector.AddStatistics(*pPoolStats); - pool->m_DedicatedAllocations.AddStatistics(*pPoolStats); -} - -void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats) -{ - VmaClearDetailedStatistics(*pPoolStats); - pool->m_BlockVector.AddDetailedStatistics(*pPoolStats); - pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats); -} - -void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) -{ - m_CurrentFrameIndex.store(frameIndex); - -#if VMA_MEMORY_BUDGET - if(m_UseExtMemoryBudget) - { - UpdateVulkanBudget(); - } -#endif // #if VMA_MEMORY_BUDGET -} - -VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) -{ - return hPool->m_BlockVector.CheckCorruption(); -} - -VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) -{ - VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT; - - // Process default pools. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; - if(pBlockVector != VMA_NULL) - { - VkResult localRes = pBlockVector->CheckCorruption(); - switch(localRes) - { - case VK_ERROR_FEATURE_NOT_PRESENT: - break; - case VK_SUCCESS: - finalRes = VK_SUCCESS; - break; - default: - return localRes; - } - } - } - - // Process custom pools. - { - VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); - for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) - { - if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) - { - VkResult localRes = pool->m_BlockVector.CheckCorruption(); - switch(localRes) - { - case VK_ERROR_FEATURE_NOT_PRESENT: - break; - case VK_SUCCESS: - finalRes = VK_SUCCESS; - break; - default: - return localRes; - } - } - } - } - - return finalRes; -} - -VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory) -{ - AtomicTransactionalIncrement deviceMemoryCountIncrement; - const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount); -#if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT - if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount) - { - return VK_ERROR_TOO_MANY_OBJECTS; - } -#endif - - const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex); - - // HeapSizeLimit is in effect for this heap. - if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0) - { - const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; - VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex]; - for(;;) - { - const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize; - if(blockBytesAfterAllocation > heapSize) - { - return VK_ERROR_OUT_OF_DEVICE_MEMORY; - } - if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation)) - { - break; - } - } - } - else - { - m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize; - } - ++m_Budget.m_BlockCount[heapIndex]; - - // VULKAN CALL vkAllocateMemory. - VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); - - if(res == VK_SUCCESS) - { -#if VMA_MEMORY_BUDGET - ++m_Budget.m_OperationsSinceBudgetFetch; -#endif - - // Informative callback. - if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) - { - (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData); - } - - deviceMemoryCountIncrement.Commit(); - } - else - { - --m_Budget.m_BlockCount[heapIndex]; - m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize; - } - - return res; -} - -void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) -{ - // Informative callback. - if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) - { - (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData); - } - - // VULKAN CALL vkFreeMemory. - (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks()); - - const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType); - --m_Budget.m_BlockCount[heapIndex]; - m_Budget.m_BlockBytes[heapIndex] -= size; - - --m_DeviceMemoryCount; -} - -VkResult VmaAllocator_T::BindVulkanBuffer( - VkDeviceMemory memory, - VkDeviceSize memoryOffset, - VkBuffer buffer, - const void* pNext) -{ - if(pNext != VMA_NULL) - { -#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 - if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && - m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL) - { - VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR }; - bindBufferMemoryInfo.pNext = pNext; - bindBufferMemoryInfo.buffer = buffer; - bindBufferMemoryInfo.memory = memory; - bindBufferMemoryInfo.memoryOffset = memoryOffset; - return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); - } - else -#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 - { - return VK_ERROR_EXTENSION_NOT_PRESENT; - } - } - else - { - return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset); - } -} - -VkResult VmaAllocator_T::BindVulkanImage( - VkDeviceMemory memory, - VkDeviceSize memoryOffset, - VkImage image, - const void* pNext) -{ - if(pNext != VMA_NULL) - { -#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 - if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && - m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL) - { - VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR }; - bindBufferMemoryInfo.pNext = pNext; - bindBufferMemoryInfo.image = image; - bindBufferMemoryInfo.memory = memory; - bindBufferMemoryInfo.memoryOffset = memoryOffset; - return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); - } - else -#endif // #if VMA_BIND_MEMORY2 - { - return VK_ERROR_EXTENSION_NOT_PRESENT; - } - } - else - { - return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset); - } -} - -VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData) -{ - switch(hAllocation->GetType()) - { - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: - { - VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); - char *pBytes = VMA_NULL; - VkResult res = pBlock->Map(this, 1, (void**)&pBytes); - if(res == VK_SUCCESS) - { - *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset(); - hAllocation->BlockAllocMap(); - } - return res; - } - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - return hAllocation->DedicatedAllocMap(this, ppData); - default: - VMA_ASSERT(0); - return VK_ERROR_MEMORY_MAP_FAILED; - } -} - -void VmaAllocator_T::Unmap(VmaAllocation hAllocation) -{ - switch(hAllocation->GetType()) - { - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: - { - VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); - hAllocation->BlockAllocUnmap(); - pBlock->Unmap(this, 1); - } - break; - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - hAllocation->DedicatedAllocUnmap(this); - break; - default: - VMA_ASSERT(0); - } -} - -VkResult VmaAllocator_T::BindBufferMemory( - VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkBuffer hBuffer, - const void* pNext) -{ - VkResult res = VK_SUCCESS; - switch(hAllocation->GetType()) - { - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext); - break; - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: - { - VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); - VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block."); - res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext); - break; - } - default: - VMA_ASSERT(0); - } - return res; -} - -VkResult VmaAllocator_T::BindImageMemory( - VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkImage hImage, - const void* pNext) -{ - VkResult res = VK_SUCCESS; - switch(hAllocation->GetType()) - { - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext); - break; - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: - { - VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); - VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block."); - res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext); - break; - } - default: - VMA_ASSERT(0); - } - return res; -} - -VkResult VmaAllocator_T::FlushOrInvalidateAllocation( - VmaAllocation hAllocation, - VkDeviceSize offset, VkDeviceSize size, - VMA_CACHE_OPERATION op) -{ - VkResult res = VK_SUCCESS; - - VkMappedMemoryRange memRange = {}; - if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange)) - { - switch(op) - { - case VMA_CACHE_FLUSH: - res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange); - break; - case VMA_CACHE_INVALIDATE: - res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange); - break; - default: - VMA_ASSERT(0); - } - } - // else: Just ignore this call. - return res; -} - -VkResult VmaAllocator_T::FlushOrInvalidateAllocations( - uint32_t allocationCount, - const VmaAllocation* allocations, - const VkDeviceSize* offsets, const VkDeviceSize* sizes, - VMA_CACHE_OPERATION op) -{ - typedef VmaStlAllocator RangeAllocator; - typedef VmaSmallVector RangeVector; - RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks())); - - for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) - { - const VmaAllocation alloc = allocations[allocIndex]; - const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0; - const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE; - VkMappedMemoryRange newRange; - if(GetFlushOrInvalidateRange(alloc, offset, size, newRange)) - { - ranges.push_back(newRange); - } - } - - VkResult res = VK_SUCCESS; - if(!ranges.empty()) - { - switch(op) - { - case VMA_CACHE_FLUSH: - res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); - break; - case VMA_CACHE_INVALIDATE: - res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); - break; - default: - VMA_ASSERT(0); - } - } - // else: Just ignore this call. - return res; -} - -void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) -{ - VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); - - const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); - VmaPool parentPool = allocation->GetParentPool(); - if(parentPool == VK_NULL_HANDLE) - { - // Default pool - m_DedicatedAllocations[memTypeIndex].Unregister(allocation); - } - else - { - // Custom pool - parentPool->m_DedicatedAllocations.Unregister(allocation); - } - - VkDeviceMemory hMemory = allocation->GetMemory(); - - /* - There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory - before vkFreeMemory. - - if(allocation->GetMappedData() != VMA_NULL) - { - (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); - } - */ - - FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory); - - m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize()); - m_AllocationObjectAllocator.Free(allocation); - - VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex); -} - -uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const -{ - VkBufferCreateInfo dummyBufCreateInfo; - VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo); - - uint32_t memoryTypeBits = 0; - - // Create buffer. - VkBuffer buf = VK_NULL_HANDLE; - VkResult res = (*GetVulkanFunctions().vkCreateBuffer)( - m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf); - if(res == VK_SUCCESS) - { - // Query for supported memory types. - VkMemoryRequirements memReq; - (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq); - memoryTypeBits = memReq.memoryTypeBits; - - // Destroy buffer. - (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks()); - } - - return memoryTypeBits; -} - -uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const -{ - // Make sure memory information is already fetched. - VMA_ASSERT(GetMemoryTypeCount() > 0); - - uint32_t memoryTypeBits = UINT32_MAX; - - if(!m_UseAmdDeviceCoherentMemory) - { - // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) - { - memoryTypeBits &= ~(1u << memTypeIndex); - } - } - } - - return memoryTypeBits; -} - -bool VmaAllocator_T::GetFlushOrInvalidateRange( - VmaAllocation allocation, - VkDeviceSize offset, VkDeviceSize size, - VkMappedMemoryRange& outRange) const -{ - const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); - if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) - { - const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; - const VkDeviceSize allocationSize = allocation->GetSize(); - VMA_ASSERT(offset <= allocationSize); - - outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; - outRange.pNext = VMA_NULL; - outRange.memory = allocation->GetMemory(); - - switch(allocation->GetType()) - { - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); - if(size == VK_WHOLE_SIZE) - { - outRange.size = allocationSize - outRange.offset; - } - else - { - VMA_ASSERT(offset + size <= allocationSize); - outRange.size = VMA_MIN( - VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize), - allocationSize - outRange.offset); - } - break; - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: - { - // 1. Still within this allocation. - outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); - if(size == VK_WHOLE_SIZE) - { - size = allocationSize - offset; - } - else - { - VMA_ASSERT(offset + size <= allocationSize); - } - outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize); - - // 2. Adjust to whole block. - const VkDeviceSize allocationOffset = allocation->GetOffset(); - VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0); - const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize(); - outRange.offset += allocationOffset; - outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset); - - break; - } - default: - VMA_ASSERT(0); - } - return true; - } - return false; -} - -#if VMA_MEMORY_BUDGET -void VmaAllocator_T::UpdateVulkanBudget() -{ - VMA_ASSERT(m_UseExtMemoryBudget); - - VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR }; - - VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT }; - VmaPnextChainPushFront(&memProps, &budgetProps); - - GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps); - - { - VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex); - - for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) - { - m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex]; - m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex]; - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load(); - - // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size. - if(m_Budget.m_VulkanBudget[heapIndex] == 0) - { - m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. - } - else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size) - { - m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size; - } - if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0) - { - m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; - } - } - m_Budget.m_OperationsSinceBudgetFetch = 0; - } -} -#endif // VMA_MEMORY_BUDGET - -void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) -{ - if(VMA_DEBUG_INITIALIZE_ALLOCATIONS && - hAllocation->IsMappingAllowed() && - (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) - { - void* pData = VMA_NULL; - VkResult res = Map(hAllocation, &pData); - if(res == VK_SUCCESS) - { - memset(pData, (int)pattern, (size_t)hAllocation->GetSize()); - FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH); - Unmap(hAllocation); - } - else - { - VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation."); - } - } -} - -uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() -{ - uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load(); - if(memoryTypeBits == UINT32_MAX) - { - memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits(); - m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits); - } - return memoryTypeBits; -} - -#if VMA_STATS_STRING_ENABLED -void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) -{ - json.WriteString("DefaultPools"); - json.BeginObject(); - { - for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex]; - VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex]; - if (pBlockVector != VMA_NULL) - { - json.BeginString("Type "); - json.ContinueString(memTypeIndex); - json.EndString(); - json.BeginObject(); - { - json.WriteString("PreferredBlockSize"); - json.WriteNumber(pBlockVector->GetPreferredBlockSize()); - - json.WriteString("Blocks"); - pBlockVector->PrintDetailedMap(json); - - json.WriteString("DedicatedAllocations"); - dedicatedAllocList.BuildStatsString(json); - } - json.EndObject(); - } - } - } - json.EndObject(); - - json.WriteString("CustomPools"); - json.BeginObject(); - { - VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); - if (!m_Pools.IsEmpty()) - { - for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - bool displayType = true; - size_t index = 0; - for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) - { - VmaBlockVector& blockVector = pool->m_BlockVector; - if (blockVector.GetMemoryTypeIndex() == memTypeIndex) - { - if (displayType) - { - json.BeginString("Type "); - json.ContinueString(memTypeIndex); - json.EndString(); - json.BeginArray(); - displayType = false; - } - - json.BeginObject(); - { - json.WriteString("Name"); - json.BeginString(); - json.ContinueString_Size(index++); - if (pool->GetName()) - { - json.ContinueString(" - "); - json.ContinueString(pool->GetName()); - } - json.EndString(); - - json.WriteString("PreferredBlockSize"); - json.WriteNumber(blockVector.GetPreferredBlockSize()); - - json.WriteString("Blocks"); - blockVector.PrintDetailedMap(json); - - json.WriteString("DedicatedAllocations"); - pool->m_DedicatedAllocations.BuildStatsString(json); - } - json.EndObject(); - } - } - - if (!displayType) - json.EndArray(); - } - } - } - json.EndObject(); -} -#endif // VMA_STATS_STRING_ENABLED -#endif // _VMA_ALLOCATOR_T_FUNCTIONS - - -#ifndef _VMA_PUBLIC_INTERFACE -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( - const VmaAllocatorCreateInfo* pCreateInfo, - VmaAllocator* pAllocator) -{ - VMA_ASSERT(pCreateInfo && pAllocator); - VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 || - (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3)); - VMA_DEBUG_LOG("vmaCreateAllocator"); - *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo); - VkResult result = (*pAllocator)->Init(pCreateInfo); - if(result < 0) - { - vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator); - *pAllocator = VK_NULL_HANDLE; - } - return result; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( - VmaAllocator allocator) -{ - if(allocator != VK_NULL_HANDLE) - { - VMA_DEBUG_LOG("vmaDestroyAllocator"); - VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying. - vma_delete(&allocationCallbacks, allocator); - } -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo) -{ - VMA_ASSERT(allocator && pAllocatorInfo); - pAllocatorInfo->instance = allocator->m_hInstance; - pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice(); - pAllocatorInfo->device = allocator->m_hDevice; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( - VmaAllocator allocator, - const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties) -{ - VMA_ASSERT(allocator && ppPhysicalDeviceProperties); - *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( - VmaAllocator allocator, - const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties) -{ - VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties); - *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( - VmaAllocator allocator, - uint32_t memoryTypeIndex, - VkMemoryPropertyFlags* pFlags) -{ - VMA_ASSERT(allocator && pFlags); - VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount()); - *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( - VmaAllocator allocator, - uint32_t frameIndex) -{ - VMA_ASSERT(allocator); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->SetCurrentFrameIndex(frameIndex); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( - VmaAllocator allocator, - VmaTotalStatistics* pStats) -{ - VMA_ASSERT(allocator && pStats); - VMA_DEBUG_GLOBAL_MUTEX_LOCK - allocator->CalculateStatistics(pStats); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( - VmaAllocator allocator, - VmaBudget* pBudgets) -{ - VMA_ASSERT(allocator && pBudgets); - VMA_DEBUG_GLOBAL_MUTEX_LOCK - allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount()); -} - -#if VMA_STATS_STRING_ENABLED - -VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( - VmaAllocator allocator, - char** ppStatsString, - VkBool32 detailedMap) -{ - VMA_ASSERT(allocator && ppStatsString); - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - VmaStringBuilder sb(allocator->GetAllocationCallbacks()); - { - VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; - allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount()); - - VmaTotalStatistics stats; - allocator->CalculateStatistics(&stats); - - VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb); - json.BeginObject(); - { - json.WriteString("General"); - json.BeginObject(); - { - const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties; - const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps; - - json.WriteString("API"); - json.WriteString("Vulkan"); - - json.WriteString("apiVersion"); - json.BeginString(); - json.ContinueString(VK_API_VERSION_MAJOR(deviceProperties.apiVersion)); - json.ContinueString("."); - json.ContinueString(VK_API_VERSION_MINOR(deviceProperties.apiVersion)); - json.ContinueString("."); - json.ContinueString(VK_API_VERSION_PATCH(deviceProperties.apiVersion)); - json.EndString(); - - json.WriteString("GPU"); - json.WriteString(deviceProperties.deviceName); - json.WriteString("deviceType"); - json.WriteNumber(static_cast(deviceProperties.deviceType)); - - json.WriteString("maxMemoryAllocationCount"); - json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount); - json.WriteString("bufferImageGranularity"); - json.WriteNumber(deviceProperties.limits.bufferImageGranularity); - json.WriteString("nonCoherentAtomSize"); - json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize); - - json.WriteString("memoryHeapCount"); - json.WriteNumber(memoryProperties.memoryHeapCount); - json.WriteString("memoryTypeCount"); - json.WriteNumber(memoryProperties.memoryTypeCount); - } - json.EndObject(); - } - { - json.WriteString("Total"); - VmaPrintDetailedStatistics(json, stats.total); - } - { - json.WriteString("MemoryInfo"); - json.BeginObject(); - { - for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) - { - json.BeginString("Heap "); - json.ContinueString(heapIndex); - json.EndString(); - json.BeginObject(); - { - const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex]; - json.WriteString("Flags"); - json.BeginArray(true); - { - if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) - json.WriteString("DEVICE_LOCAL"); - #if VMA_VULKAN_VERSION >= 1001000 - if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT) - json.WriteString("MULTI_INSTANCE"); - #endif - - VkMemoryHeapFlags flags = heapInfo.flags & - ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT - #if VMA_VULKAN_VERSION >= 1001000 - | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT - #endif - ); - if (flags != 0) - json.WriteNumber(flags); - } - json.EndArray(); - - json.WriteString("Size"); - json.WriteNumber(heapInfo.size); - - json.WriteString("Budget"); - json.BeginObject(); - { - json.WriteString("BudgetBytes"); - json.WriteNumber(budgets[heapIndex].budget); - json.WriteString("UsageBytes"); - json.WriteNumber(budgets[heapIndex].usage); - } - json.EndObject(); - - json.WriteString("Stats"); - VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]); - - json.WriteString("MemoryPools"); - json.BeginObject(); - { - for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) - { - if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) - { - json.BeginString("Type "); - json.ContinueString(typeIndex); - json.EndString(); - json.BeginObject(); - { - json.WriteString("Flags"); - json.BeginArray(true); - { - VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags; - if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) - json.WriteString("DEVICE_LOCAL"); - if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) - json.WriteString("HOST_VISIBLE"); - if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) - json.WriteString("HOST_COHERENT"); - if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) - json.WriteString("HOST_CACHED"); - if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) - json.WriteString("LAZILY_ALLOCATED"); - #if VMA_VULKAN_VERSION >= 1001000 - if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) - json.WriteString("PROTECTED"); - #endif - #if VK_AMD_device_coherent_memory - if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) - json.WriteString("DEVICE_COHERENT_AMD"); - if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) - json.WriteString("DEVICE_UNCACHED_AMD"); - #endif - - flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT - #if VMA_VULKAN_VERSION >= 1001000 - | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT - #endif - #if VK_AMD_device_coherent_memory - | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY - | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY - #endif - | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT - | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT - | VK_MEMORY_PROPERTY_HOST_CACHED_BIT); - if (flags != 0) - json.WriteNumber(flags); - } - json.EndArray(); - - json.WriteString("Stats"); - VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]); - } - json.EndObject(); - } - } - - } - json.EndObject(); - } - json.EndObject(); - } - } - json.EndObject(); - } - - if (detailedMap == VK_TRUE) - allocator->PrintDetailedMap(json); - - json.EndObject(); - } - - *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength()); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( - VmaAllocator allocator, - char* pStatsString) -{ - if(pStatsString != VMA_NULL) - { - VMA_ASSERT(allocator); - VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString); - } -} - -#endif // VMA_STATS_STRING_ENABLED - -/* -This function is not protected by any mutex because it just reads immutable data. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( - VmaAllocator allocator, - uint32_t memoryTypeBits, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - uint32_t* pMemoryTypeIndex) -{ - VMA_ASSERT(allocator != VK_NULL_HANDLE); - VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); - VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); - - return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, UINT32_MAX, pMemoryTypeIndex); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( - VmaAllocator allocator, - const VkBufferCreateInfo* pBufferCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - uint32_t* pMemoryTypeIndex) -{ - VMA_ASSERT(allocator != VK_NULL_HANDLE); - VMA_ASSERT(pBufferCreateInfo != VMA_NULL); - VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); - VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); - - const VkDevice hDev = allocator->m_hDevice; - const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); - VkResult res; - -#if VMA_VULKAN_VERSION >= 1003000 - if(funcs->vkGetDeviceBufferMemoryRequirements) - { - // Can query straight from VkBufferCreateInfo :) - VkDeviceBufferMemoryRequirements devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS}; - devBufMemReq.pCreateInfo = pBufferCreateInfo; - - VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; - (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq); - - res = allocator->FindMemoryTypeIndex( - memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex); - } - else -#endif // #if VMA_VULKAN_VERSION >= 1003000 - { - // Must create a dummy buffer to query :( - VkBuffer hBuffer = VK_NULL_HANDLE; - res = funcs->vkCreateBuffer( - hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); - if(res == VK_SUCCESS) - { - VkMemoryRequirements memReq = {}; - funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq); - - res = allocator->FindMemoryTypeIndex( - memReq.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex); - - funcs->vkDestroyBuffer( - hDev, hBuffer, allocator->GetAllocationCallbacks()); - } - } - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( - VmaAllocator allocator, - const VkImageCreateInfo* pImageCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - uint32_t* pMemoryTypeIndex) -{ - VMA_ASSERT(allocator != VK_NULL_HANDLE); - VMA_ASSERT(pImageCreateInfo != VMA_NULL); - VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); - VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); - - const VkDevice hDev = allocator->m_hDevice; - const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); - VkResult res; - -#if VMA_VULKAN_VERSION >= 1003000 - if(funcs->vkGetDeviceImageMemoryRequirements) - { - // Can query straight from VkImageCreateInfo :) - VkDeviceImageMemoryRequirements devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS}; - devImgMemReq.pCreateInfo = pImageCreateInfo; - VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 && - "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect."); - - VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; - (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq); - - res = allocator->FindMemoryTypeIndex( - memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex); - } - else -#endif // #if VMA_VULKAN_VERSION >= 1003000 - { - // Must create a dummy image to query :( - VkImage hImage = VK_NULL_HANDLE; - res = funcs->vkCreateImage( - hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); - if(res == VK_SUCCESS) - { - VkMemoryRequirements memReq = {}; - funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq); - - res = allocator->FindMemoryTypeIndex( - memReq.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex); - - funcs->vkDestroyImage( - hDev, hImage, allocator->GetAllocationCallbacks()); - } - } - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( - VmaAllocator allocator, - const VmaPoolCreateInfo* pCreateInfo, - VmaPool* pPool) -{ - VMA_ASSERT(allocator && pCreateInfo && pPool); - - VMA_DEBUG_LOG("vmaCreatePool"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return allocator->CreatePool(pCreateInfo, pPool); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( - VmaAllocator allocator, - VmaPool pool) -{ - VMA_ASSERT(allocator); - - if(pool == VK_NULL_HANDLE) - { - return; - } - - VMA_DEBUG_LOG("vmaDestroyPool"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->DestroyPool(pool); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( - VmaAllocator allocator, - VmaPool pool, - VmaStatistics* pPoolStats) -{ - VMA_ASSERT(allocator && pool && pPoolStats); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->GetPoolStatistics(pool, pPoolStats); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( - VmaAllocator allocator, - VmaPool pool, - VmaDetailedStatistics* pPoolStats) -{ - VMA_ASSERT(allocator && pool && pPoolStats); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->CalculatePoolStatistics(pool, pPoolStats); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) -{ - VMA_ASSERT(allocator && pool); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - VMA_DEBUG_LOG("vmaCheckPoolCorruption"); - - return allocator->CheckPoolCorruption(pool); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( - VmaAllocator allocator, - VmaPool pool, - const char** ppName) -{ - VMA_ASSERT(allocator && pool && ppName); - - VMA_DEBUG_LOG("vmaGetPoolName"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - *ppName = pool->GetName(); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( - VmaAllocator allocator, - VmaPool pool, - const char* pName) -{ - VMA_ASSERT(allocator && pool); - - VMA_DEBUG_LOG("vmaSetPoolName"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - pool->SetName(pName); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( - VmaAllocator allocator, - const VkMemoryRequirements* pVkMemoryRequirements, - const VmaAllocationCreateInfo* pCreateInfo, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) -{ - VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation); - - VMA_DEBUG_LOG("vmaAllocateMemory"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - VkResult result = allocator->AllocateMemory( - *pVkMemoryRequirements, - false, // requiresDedicatedAllocation - false, // prefersDedicatedAllocation - VK_NULL_HANDLE, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage - UINT32_MAX, // dedicatedBufferImageUsage - *pCreateInfo, - VMA_SUBALLOCATION_TYPE_UNKNOWN, - 1, // allocationCount - pAllocation); - - if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) - { - allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); - } - - return result; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( - VmaAllocator allocator, - const VkMemoryRequirements* pVkMemoryRequirements, - const VmaAllocationCreateInfo* pCreateInfo, - size_t allocationCount, - VmaAllocation* pAllocations, - VmaAllocationInfo* pAllocationInfo) -{ - if(allocationCount == 0) - { - return VK_SUCCESS; - } - - VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations); - - VMA_DEBUG_LOG("vmaAllocateMemoryPages"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - VkResult result = allocator->AllocateMemory( - *pVkMemoryRequirements, - false, // requiresDedicatedAllocation - false, // prefersDedicatedAllocation - VK_NULL_HANDLE, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage - UINT32_MAX, // dedicatedBufferImageUsage - *pCreateInfo, - VMA_SUBALLOCATION_TYPE_UNKNOWN, - allocationCount, - pAllocations); - - if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) - { - for(size_t i = 0; i < allocationCount; ++i) - { - allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i); - } - } - - return result; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( - VmaAllocator allocator, - VkBuffer buffer, - const VmaAllocationCreateInfo* pCreateInfo, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) -{ - VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation); - - VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - VkMemoryRequirements vkMemReq = {}; - bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; - allocator->GetBufferMemoryRequirements(buffer, vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation); - - VkResult result = allocator->AllocateMemory( - vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation, - buffer, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage - UINT32_MAX, // dedicatedBufferImageUsage - *pCreateInfo, - VMA_SUBALLOCATION_TYPE_BUFFER, - 1, // allocationCount - pAllocation); - - if(pAllocationInfo && result == VK_SUCCESS) - { - allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); - } - - return result; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( - VmaAllocator allocator, - VkImage image, - const VmaAllocationCreateInfo* pCreateInfo, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) -{ - VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation); - - VMA_DEBUG_LOG("vmaAllocateMemoryForImage"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - VkMemoryRequirements vkMemReq = {}; - bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; - allocator->GetImageMemoryRequirements(image, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); - - VkResult result = allocator->AllocateMemory( - vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation, - VK_NULL_HANDLE, // dedicatedBuffer - image, // dedicatedImage - UINT32_MAX, // dedicatedBufferImageUsage - *pCreateInfo, - VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, - 1, // allocationCount - pAllocation); - - if(pAllocationInfo && result == VK_SUCCESS) - { - allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); - } - - return result; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( - VmaAllocator allocator, - VmaAllocation allocation) -{ - VMA_ASSERT(allocator); - - if(allocation == VK_NULL_HANDLE) - { - return; - } - - VMA_DEBUG_LOG("vmaFreeMemory"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->FreeMemory( - 1, // allocationCount - &allocation); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( - VmaAllocator allocator, - size_t allocationCount, - const VmaAllocation* pAllocations) -{ - if(allocationCount == 0) - { - return; - } - - VMA_ASSERT(allocator); - - VMA_DEBUG_LOG("vmaFreeMemoryPages"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->FreeMemory(allocationCount, pAllocations); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( - VmaAllocator allocator, - VmaAllocation allocation, - VmaAllocationInfo* pAllocationInfo) -{ - VMA_ASSERT(allocator && allocation && pAllocationInfo); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->GetAllocationInfo(allocation, pAllocationInfo); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( - VmaAllocator allocator, - VmaAllocation allocation, - void* pUserData) -{ - VMA_ASSERT(allocator && allocation); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocation->SetUserData(allocator, pUserData); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const char* VMA_NULLABLE pName) -{ - allocation->SetName(allocator, pName); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkMemoryPropertyFlags* VMA_NOT_NULL pFlags) -{ - VMA_ASSERT(allocator && allocation && pFlags); - const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); - *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( - VmaAllocator allocator, - VmaAllocation allocation, - void** ppData) -{ - VMA_ASSERT(allocator && allocation && ppData); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return allocator->Map(allocation, ppData); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( - VmaAllocator allocator, - VmaAllocation allocation) -{ - VMA_ASSERT(allocator && allocation); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->Unmap(allocation); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( - VmaAllocator allocator, - VmaAllocation allocation, - VkDeviceSize offset, - VkDeviceSize size) -{ - VMA_ASSERT(allocator && allocation); - - VMA_DEBUG_LOG("vmaFlushAllocation"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH); - - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( - VmaAllocator allocator, - VmaAllocation allocation, - VkDeviceSize offset, - VkDeviceSize size) -{ - VMA_ASSERT(allocator && allocation); - - VMA_DEBUG_LOG("vmaInvalidateAllocation"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE); - - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( - VmaAllocator allocator, - uint32_t allocationCount, - const VmaAllocation* allocations, - const VkDeviceSize* offsets, - const VkDeviceSize* sizes) -{ - VMA_ASSERT(allocator); - - if(allocationCount == 0) - { - return VK_SUCCESS; - } - - VMA_ASSERT(allocations); - - VMA_DEBUG_LOG("vmaFlushAllocations"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH); - - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( - VmaAllocator allocator, - uint32_t allocationCount, - const VmaAllocation* allocations, - const VkDeviceSize* offsets, - const VkDeviceSize* sizes) -{ - VMA_ASSERT(allocator); - - if(allocationCount == 0) - { - return VK_SUCCESS; - } - - VMA_ASSERT(allocations); - - VMA_DEBUG_LOG("vmaInvalidateAllocations"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE); - - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( - VmaAllocator allocator, - uint32_t memoryTypeBits) -{ - VMA_ASSERT(allocator); - - VMA_DEBUG_LOG("vmaCheckCorruption"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return allocator->CheckCorruption(memoryTypeBits); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( - VmaAllocator allocator, - const VmaDefragmentationInfo* pInfo, - VmaDefragmentationContext* pContext) -{ - VMA_ASSERT(allocator && pInfo && pContext); - - VMA_DEBUG_LOG("vmaBeginDefragmentation"); - - if (pInfo->pool != VMA_NULL) - { - // Check if run on supported algorithms - if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) - return VK_ERROR_FEATURE_NOT_PRESENT; - } - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo); - return VK_SUCCESS; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( - VmaAllocator allocator, - VmaDefragmentationContext context, - VmaDefragmentationStats* pStats) -{ - VMA_ASSERT(allocator && context); - - VMA_DEBUG_LOG("vmaEndDefragmentation"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - if (pStats) - context->GetStats(*pStats); - vma_delete(allocator, context); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( - VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) -{ - VMA_ASSERT(context && pPassInfo); - - VMA_DEBUG_LOG("vmaBeginDefragmentationPass"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return context->DefragmentPassBegin(*pPassInfo); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( - VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) -{ - VMA_ASSERT(context && pPassInfo); - - VMA_DEBUG_LOG("vmaEndDefragmentationPass"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return context->DefragmentPassEnd(*pPassInfo); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( - VmaAllocator allocator, - VmaAllocation allocation, - VkBuffer buffer) -{ - VMA_ASSERT(allocator && allocation && buffer); - - VMA_DEBUG_LOG("vmaBindBufferMemory"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( - VmaAllocator allocator, - VmaAllocation allocation, - VkDeviceSize allocationLocalOffset, - VkBuffer buffer, - const void* pNext) -{ - VMA_ASSERT(allocator && allocation && buffer); - - VMA_DEBUG_LOG("vmaBindBufferMemory2"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( - VmaAllocator allocator, - VmaAllocation allocation, - VkImage image) -{ - VMA_ASSERT(allocator && allocation && image); - - VMA_DEBUG_LOG("vmaBindImageMemory"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return allocator->BindImageMemory(allocation, 0, image, VMA_NULL); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( - VmaAllocator allocator, - VmaAllocation allocation, - VkDeviceSize allocationLocalOffset, - VkImage image, - const void* pNext) -{ - VMA_ASSERT(allocator && allocation && image); - - VMA_DEBUG_LOG("vmaBindImageMemory2"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( - VmaAllocator allocator, - const VkBufferCreateInfo* pBufferCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - VkBuffer* pBuffer, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) -{ - VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation); - - if(pBufferCreateInfo->size == 0) - { - return VK_ERROR_INITIALIZATION_FAILED; - } - if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && - !allocator->m_UseKhrBufferDeviceAddress) - { - VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); - return VK_ERROR_INITIALIZATION_FAILED; - } - - VMA_DEBUG_LOG("vmaCreateBuffer"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - *pBuffer = VK_NULL_HANDLE; - *pAllocation = VK_NULL_HANDLE; - - // 1. Create VkBuffer. - VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( - allocator->m_hDevice, - pBufferCreateInfo, - allocator->GetAllocationCallbacks(), - pBuffer); - if(res >= 0) - { - // 2. vkGetBufferMemoryRequirements. - VkMemoryRequirements vkMemReq = {}; - bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; - allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); - - // 3. Allocate memory using allocator. - res = allocator->AllocateMemory( - vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation, - *pBuffer, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage - pBufferCreateInfo->usage, // dedicatedBufferImageUsage - *pAllocationCreateInfo, - VMA_SUBALLOCATION_TYPE_BUFFER, - 1, // allocationCount - pAllocation); - - if(res >= 0) - { - // 3. Bind buffer with memory. - if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) - { - res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); - } - if(res >= 0) - { - // All steps succeeded. - #if VMA_STATS_STRING_ENABLED - (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); - #endif - if(pAllocationInfo != VMA_NULL) - { - allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); - } - - return VK_SUCCESS; - } - allocator->FreeMemory( - 1, // allocationCount - pAllocation); - *pAllocation = VK_NULL_HANDLE; - (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); - *pBuffer = VK_NULL_HANDLE; - return res; - } - (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); - *pBuffer = VK_NULL_HANDLE; - return res; - } - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( - VmaAllocator allocator, - const VkBufferCreateInfo* pBufferCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - VkDeviceSize minAlignment, - VkBuffer* pBuffer, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) -{ - VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation); - - if(pBufferCreateInfo->size == 0) - { - return VK_ERROR_INITIALIZATION_FAILED; - } - if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && - !allocator->m_UseKhrBufferDeviceAddress) - { - VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); - return VK_ERROR_INITIALIZATION_FAILED; - } - - VMA_DEBUG_LOG("vmaCreateBufferWithAlignment"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - *pBuffer = VK_NULL_HANDLE; - *pAllocation = VK_NULL_HANDLE; - - // 1. Create VkBuffer. - VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( - allocator->m_hDevice, - pBufferCreateInfo, - allocator->GetAllocationCallbacks(), - pBuffer); - if(res >= 0) - { - // 2. vkGetBufferMemoryRequirements. - VkMemoryRequirements vkMemReq = {}; - bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; - allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); - - // 2a. Include minAlignment - vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment); - - // 3. Allocate memory using allocator. - res = allocator->AllocateMemory( - vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation, - *pBuffer, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage - pBufferCreateInfo->usage, // dedicatedBufferImageUsage - *pAllocationCreateInfo, - VMA_SUBALLOCATION_TYPE_BUFFER, - 1, // allocationCount - pAllocation); - - if(res >= 0) - { - // 3. Bind buffer with memory. - if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) - { - res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); - } - if(res >= 0) - { - // All steps succeeded. - #if VMA_STATS_STRING_ENABLED - (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); - #endif - if(pAllocationInfo != VMA_NULL) - { - allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); - } - - return VK_SUCCESS; - } - allocator->FreeMemory( - 1, // allocationCount - pAllocation); - *pAllocation = VK_NULL_HANDLE; - (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); - *pBuffer = VK_NULL_HANDLE; - return res; - } - (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); - *pBuffer = VK_NULL_HANDLE; - return res; - } - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) -{ - VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation); - - VMA_DEBUG_LOG("vmaCreateAliasingBuffer"); - - *pBuffer = VK_NULL_HANDLE; - - if (pBufferCreateInfo->size == 0) - { - return VK_ERROR_INITIALIZATION_FAILED; - } - if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && - !allocator->m_UseKhrBufferDeviceAddress) - { - VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); - return VK_ERROR_INITIALIZATION_FAILED; - } - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - // 1. Create VkBuffer. - VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( - allocator->m_hDevice, - pBufferCreateInfo, - allocator->GetAllocationCallbacks(), - pBuffer); - if (res >= 0) - { - // 2. Bind buffer with memory. - res = allocator->BindBufferMemory(allocation, 0, *pBuffer, VMA_NULL); - if (res >= 0) - { - return VK_SUCCESS; - } - (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); - } - return res; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( - VmaAllocator allocator, - VkBuffer buffer, - VmaAllocation allocation) -{ - VMA_ASSERT(allocator); - - if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) - { - return; - } - - VMA_DEBUG_LOG("vmaDestroyBuffer"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - if(buffer != VK_NULL_HANDLE) - { - (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks()); - } - - if(allocation != VK_NULL_HANDLE) - { - allocator->FreeMemory( - 1, // allocationCount - &allocation); - } -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( - VmaAllocator allocator, - const VkImageCreateInfo* pImageCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - VkImage* pImage, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) -{ - VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation); - - if(pImageCreateInfo->extent.width == 0 || - pImageCreateInfo->extent.height == 0 || - pImageCreateInfo->extent.depth == 0 || - pImageCreateInfo->mipLevels == 0 || - pImageCreateInfo->arrayLayers == 0) - { - return VK_ERROR_INITIALIZATION_FAILED; - } - - VMA_DEBUG_LOG("vmaCreateImage"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - *pImage = VK_NULL_HANDLE; - *pAllocation = VK_NULL_HANDLE; - - // 1. Create VkImage. - VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( - allocator->m_hDevice, - pImageCreateInfo, - allocator->GetAllocationCallbacks(), - pImage); - if(res >= 0) - { - VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? - VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : - VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; - - // 2. Allocate memory using allocator. - VkMemoryRequirements vkMemReq = {}; - bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; - allocator->GetImageMemoryRequirements(*pImage, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); - - res = allocator->AllocateMemory( - vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation, - VK_NULL_HANDLE, // dedicatedBuffer - *pImage, // dedicatedImage - pImageCreateInfo->usage, // dedicatedBufferImageUsage - *pAllocationCreateInfo, - suballocType, - 1, // allocationCount - pAllocation); - - if(res >= 0) - { - // 3. Bind image with memory. - if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) - { - res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL); - } - if(res >= 0) - { - // All steps succeeded. - #if VMA_STATS_STRING_ENABLED - (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage); - #endif - if(pAllocationInfo != VMA_NULL) - { - allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); - } - - return VK_SUCCESS; - } - allocator->FreeMemory( - 1, // allocationCount - pAllocation); - *pAllocation = VK_NULL_HANDLE; - (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); - *pImage = VK_NULL_HANDLE; - return res; - } - (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); - *pImage = VK_NULL_HANDLE; - return res; - } - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, - VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) -{ - VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation); - - *pImage = VK_NULL_HANDLE; - - VMA_DEBUG_LOG("vmaCreateImage"); - - if (pImageCreateInfo->extent.width == 0 || - pImageCreateInfo->extent.height == 0 || - pImageCreateInfo->extent.depth == 0 || - pImageCreateInfo->mipLevels == 0 || - pImageCreateInfo->arrayLayers == 0) - { - return VK_ERROR_INITIALIZATION_FAILED; - } - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - // 1. Create VkImage. - VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( - allocator->m_hDevice, - pImageCreateInfo, - allocator->GetAllocationCallbacks(), - pImage); - if (res >= 0) - { - // 2. Bind image with memory. - res = allocator->BindImageMemory(allocation, 0, *pImage, VMA_NULL); - if (res >= 0) - { - return VK_SUCCESS; - } - (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); - } - return res; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( - VmaAllocator VMA_NOT_NULL allocator, - VkImage VMA_NULLABLE_NON_DISPATCHABLE image, - VmaAllocation VMA_NULLABLE allocation) -{ - VMA_ASSERT(allocator); - - if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) - { - return; - } - - VMA_DEBUG_LOG("vmaDestroyImage"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - if(image != VK_NULL_HANDLE) - { - (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks()); - } - if(allocation != VK_NULL_HANDLE) - { - allocator->FreeMemory( - 1, // allocationCount - &allocation); - } -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( - const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock) -{ - VMA_ASSERT(pCreateInfo && pVirtualBlock); - VMA_ASSERT(pCreateInfo->size > 0); - VMA_DEBUG_LOG("vmaCreateVirtualBlock"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo); - VkResult res = (*pVirtualBlock)->Init(); - if(res < 0) - { - vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock); - *pVirtualBlock = VK_NULL_HANDLE; - } - return res; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock) -{ - if(virtualBlock != VK_NULL_HANDLE) - { - VMA_DEBUG_LOG("vmaDestroyVirtualBlock"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying. - vma_delete(&allocationCallbacks, virtualBlock); - } -} - -VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); - VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL); - VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, - VkDeviceSize* VMA_NULLABLE pOffset) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL); - VMA_DEBUG_LOG("vmaVirtualAllocate"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation) -{ - if(allocation != VK_NULL_HANDLE) - { - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); - VMA_DEBUG_LOG("vmaVirtualFree"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->Free(allocation); - } -} - -VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); - VMA_DEBUG_LOG("vmaClearVirtualBlock"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->Clear(); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); - VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->SetAllocationUserData(allocation, pUserData); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaStatistics* VMA_NOT_NULL pStats) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); - VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->GetStatistics(*pStats); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaDetailedStatistics* VMA_NOT_NULL pStats) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); - VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->CalculateDetailedStatistics(*pStats); -} - -#if VMA_STATS_STRING_ENABLED - -VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks(); - VmaStringBuilder sb(allocationCallbacks); - virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb); - *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength()); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - char* VMA_NULLABLE pStatsString) -{ - if(pStatsString != VMA_NULL) - { - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString); - } -} -#endif // VMA_STATS_STRING_ENABLED -#endif // _VMA_PUBLIC_INTERFACE -#endif // VMA_IMPLEMENTATION - -/** -\page quick_start Quick start - -\section quick_start_project_setup Project setup - -Vulkan Memory Allocator comes in form of a "stb-style" single header file. -You don't need to build it as a separate library project. -You can add this file directly to your project and submit it to code repository next to your other source files. - -"Single header" doesn't mean that everything is contained in C/C++ declarations, -like it tends to be in case of inline functions or C++ templates. -It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro. -If you don't do it properly, you will get linker errors. - -To do it properly: - --# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library. - This includes declarations of all members of the library. --# In exactly one CPP file define following macro before this include. - It enables also internal definitions. - -\code -#define VMA_IMPLEMENTATION -#include "vk_mem_alloc.h" -\endcode - -It may be a good idea to create dedicated CPP file just for this purpose. - -This library includes header ``, which in turn -includes `` on Windows. If you need some specific macros defined -before including these headers (like `WIN32_LEAN_AND_MEAN` or -`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define -them before every `#include` of this library. - -This library is written in C++, but has C-compatible interface. -Thus you can include and use vk_mem_alloc.h in C or C++ code, but full -implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C. -Some features of C++14 used. STL containers, RTTI, or C++ exceptions are not used. - - -\section quick_start_initialization Initialization - -At program startup: - --# Initialize Vulkan to have `VkPhysicalDevice`, `VkDevice` and `VkInstance` object. --# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by - calling vmaCreateAllocator(). - -Only members `physicalDevice`, `device`, `instance` are required. -However, you should inform the library which Vulkan version do you use by setting -VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable -by setting VmaAllocatorCreateInfo::flags (like #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT for VK_KHR_buffer_device_address). -Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions. - -You may need to configure importing Vulkan functions. There are 3 ways to do this: - --# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows): - - You don't need to do anything. - - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default. --# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`, - `vkGetDeviceProcAddr` (this is the option presented in the example below): - - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1. - - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr, - VmaVulkanFunctions::vkGetDeviceProcAddr. - - The library will fetch pointers to all other functions it needs internally. --# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like - [Volk](https://github.com/zeux/volk): - - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0. - - Pass these pointers via structure #VmaVulkanFunctions. - -\code -VmaVulkanFunctions vulkanFunctions = {}; -vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr; -vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr; - -VmaAllocatorCreateInfo allocatorCreateInfo = {}; -allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2; -allocatorCreateInfo.physicalDevice = physicalDevice; -allocatorCreateInfo.device = device; -allocatorCreateInfo.instance = instance; -allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions; - -VmaAllocator allocator; -vmaCreateAllocator(&allocatorCreateInfo, &allocator); -\endcode - - -\section quick_start_resource_allocation Resource allocation - -When you want to create a buffer or image: - --# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure. --# Fill VmaAllocationCreateInfo structure. --# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory - already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory. - -\code -VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufferInfo.size = 65536; -bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo allocInfo = {}; -allocInfo.usage = VMA_MEMORY_USAGE_AUTO; - -VkBuffer buffer; -VmaAllocation allocation; -vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); -\endcode - -Don't forget to destroy your objects when no longer needed: - -\code -vmaDestroyBuffer(allocator, buffer, allocation); -vmaDestroyAllocator(allocator); -\endcode - - -\page choosing_memory_type Choosing memory type - -Physical devices in Vulkan support various combinations of memory heaps and -types. Help with choosing correct and optimal memory type for your specific -resource is one of the key features of this library. You can use it by filling -appropriate members of VmaAllocationCreateInfo structure, as described below. -You can also combine multiple methods. - --# If you just want to find memory type index that meets your requirements, you - can use function: vmaFindMemoryTypeIndexForBufferInfo(), - vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex(). --# If you want to allocate a region of device memory without association with any - specific image or buffer, you can use function vmaAllocateMemory(). Usage of - this function is not recommended and usually not needed. - vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once, - which may be useful for sparse binding. --# If you already have a buffer or an image created, you want to allocate memory - for it and then you will bind it yourself, you can use function - vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). - For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() - or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2(). --# **This is the easiest and recommended way to use this library:** - If you want to create a buffer or an image, allocate memory for it and bind - them together, all in one call, you can use function vmaCreateBuffer(), - vmaCreateImage(). - -When using 3. or 4., the library internally queries Vulkan for memory types -supported for that buffer or image (function `vkGetBufferMemoryRequirements()`) -and uses only one of these types. - -If no memory type can be found that meets all the requirements, these functions -return `VK_ERROR_FEATURE_NOT_PRESENT`. - -You can leave VmaAllocationCreateInfo structure completely filled with zeros. -It means no requirements are specified for memory type. -It is valid, although not very useful. - -\section choosing_memory_type_usage Usage - -The easiest way to specify memory requirements is to fill member -VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage. -It defines high level, common usage types. -Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically. - -For example, if you want to create a uniform buffer that will be filled using -transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can -do it using following code. The buffer will most likely end up in a memory type with -`VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device. - -\code -VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufferInfo.size = 65536; -bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo allocInfo = {}; -allocInfo.usage = VMA_MEMORY_USAGE_AUTO; - -VkBuffer buffer; -VmaAllocation allocation; -vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); -\endcode - -If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory -on systems with discrete graphics card that have the memories separate, you can use -#VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST. - -When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory, -you also need to specify one of the host access flags: -#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. -This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` -so you can map it. - -For example, a staging buffer that will be filled via mapped pointer and then -used as a source of transfer to the buffer decribed previously can be created like this. -It will likely and up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT` -but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM). - -\code -VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -stagingBufferInfo.size = 65536; -stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; - -VmaAllocationCreateInfo stagingAllocInfo = {}; -stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO; -stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; - -VkBuffer stagingBuffer; -VmaAllocation stagingAllocation; -vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr); -\endcode - -For more examples of creating different kinds of resources, see chapter \ref usage_patterns. - -Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows -about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed, -so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc. -If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting -memory type, as decribed below. - -\note -Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`, -`VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`) -are still available and work same way as in previous versions of the library -for backward compatibility, but they are not recommended. - -\section choosing_memory_type_required_preferred_flags Required and preferred flags - -You can specify more detailed requirements by filling members -VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags -with a combination of bits from enum `VkMemoryPropertyFlags`. For example, -if you want to create a buffer that will be persistently mapped on host (so it -must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`, -use following code: - -\code -VmaAllocationCreateInfo allocInfo = {}; -allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; -allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; -allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; - -VkBuffer buffer; -VmaAllocation allocation; -vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); -\endcode - -A memory type is chosen that has all the required flags and as many preferred -flags set as possible. - -Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags, -plus some extra "magic" (heuristics). - -\section choosing_memory_type_explicit_memory_types Explicit memory types - -If you inspected memory types available on the physical device and you have -a preference for memory types that you want to use, you can fill member -VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set -means that a memory type with that index is allowed to be used for the -allocation. Special value 0, just like `UINT32_MAX`, means there are no -restrictions to memory type index. - -Please note that this member is NOT just a memory type index. -Still you can use it to choose just one, specific memory type. -For example, if you already determined that your buffer should be created in -memory type 2, use following code: - -\code -uint32_t memoryTypeIndex = 2; - -VmaAllocationCreateInfo allocInfo = {}; -allocInfo.memoryTypeBits = 1u << memoryTypeIndex; - -VkBuffer buffer; -VmaAllocation allocation; -vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); -\endcode - - -\section choosing_memory_type_custom_memory_pools Custom memory pools - -If you allocate from custom memory pool, all the ways of specifying memory -requirements described above are not applicable and the aforementioned members -of VmaAllocationCreateInfo structure are ignored. Memory type is selected -explicitly when creating the pool and then used to make all the allocations from -that pool. For further details, see \ref custom_memory_pools. - -\section choosing_memory_type_dedicated_allocations Dedicated allocations - -Memory for allocations is reserved out of larger block of `VkDeviceMemory` -allocated from Vulkan internally. That is the main feature of this whole library. -You can still request a separate memory block to be created for an allocation, -just like you would do in a trivial solution without using any allocator. -In that case, a buffer or image is always bound to that memory at offset 0. -This is called a "dedicated allocation". -You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. -The library can also internally decide to use dedicated allocation in some cases, e.g.: - -- When the size of the allocation is large. -- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled - and it reports that dedicated allocation is required or recommended for the resource. -- When allocation of next big memory block fails due to not enough device memory, - but allocation with the exact requested size succeeds. - - -\page memory_mapping Memory mapping - -To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`, -to be able to read from it or write to it in CPU code. -Mapping is possible only of memory allocated from a memory type that has -`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. -Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose. -You can use them directly with memory allocated by this library, -but it is not recommended because of following issue: -Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed. -This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan. -Because of this, Vulkan Memory Allocator provides following facilities: - -\note If you want to be able to map an allocation, you need to specify one of the flags -#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT -in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable -when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values. -For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable, -but they can still be used for consistency. - -\section memory_mapping_mapping_functions Mapping functions - -The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory(). -They are safer and more convenient to use than standard Vulkan functions. -You can map an allocation multiple times simultaneously - mapping is reference-counted internally. -You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block. -The way it is implemented is that the library always maps entire memory block, not just region of the allocation. -For further details, see description of vmaMapMemory() function. -Example: - -\code -// Having these objects initialized: -struct ConstantBuffer -{ - ... -}; -ConstantBuffer constantBufferData = ... - -VmaAllocator allocator = ... -VkBuffer constantBuffer = ... -VmaAllocation constantBufferAllocation = ... - -// You can map and fill your buffer using following code: - -void* mappedData; -vmaMapMemory(allocator, constantBufferAllocation, &mappedData); -memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); -vmaUnmapMemory(allocator, constantBufferAllocation); -\endcode - -When mapping, you may see a warning from Vulkan validation layer similar to this one: - -Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used. - -It happens because the library maps entire `VkDeviceMemory` block, where different -types of images and buffers may end up together, especially on GPUs with unified memory like Intel. -You can safely ignore it if you are sure you access only memory of the intended -object that you wanted to map. - - -\section memory_mapping_persistently_mapped_memory Persistently mapped memory - -Kepping your memory persistently mapped is generally OK in Vulkan. -You don't need to unmap it before using its data on the GPU. -The library provides a special feature designed for that: -Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in -VmaAllocationCreateInfo::flags stay mapped all the time, -so you can just access CPU pointer to it any time -without a need to call any "map" or "unmap" function. -Example: - -\code -VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufCreateInfo.size = sizeof(ConstantBuffer); -bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | - VMA_ALLOCATION_CREATE_MAPPED_BIT; - -VkBuffer buf; -VmaAllocation alloc; -VmaAllocationInfo allocInfo; -vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - -// Buffer is already mapped. You can access its memory. -memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); -\endcode - -\note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up -in a mappable memory type. -For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or -#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. -#VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation. -For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading. - -\section memory_mapping_cache_control Cache flush and invalidate - -Memory in Vulkan doesn't need to be unmapped before using it on GPU, -but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set, -you need to manually **invalidate** cache before reading of mapped pointer -and **flush** cache after writing to mapped pointer. -Map/unmap operations don't do that automatically. -Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`, -`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient -functions that refer to given allocation object: vmaFlushAllocation(), -vmaInvalidateAllocation(), -or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations(). - -Regions of memory specified for flush/invalidate must be aligned to -`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library. -In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations -within blocks are aligned to this value, so their offsets are always multiply of -`nonCoherentAtomSize` and two different allocations never share same "line" of this size. - -Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA) -currently provide `HOST_COHERENT` flag on all memory types that are -`HOST_VISIBLE`, so on PC you may not need to bother. - - -\page staying_within_budget Staying within budget - -When developing a graphics-intensive game or program, it is important to avoid allocating -more GPU memory than it is physically available. When the memory is over-committed, -various bad things can happen, depending on the specific GPU, graphics driver, and -operating system: - -- It may just work without any problems. -- The application may slow down because some memory blocks are moved to system RAM - and the GPU has to access them through PCI Express bus. -- A new allocation may take very long time to complete, even few seconds, and possibly - freeze entire system. -- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. -- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST` - returned somewhere later. - -\section staying_within_budget_querying_for_budget Querying for budget - -To query for current memory usage and available budget, use function vmaGetHeapBudgets(). -Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap. - -Please note that this function returns different information and works faster than -vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every -allocation, while vmaCalculateStatistics() is intended to be used rarely, -only to obtain statistical information, e.g. for debugging purposes. - -It is recommended to use VK_EXT_memory_budget device extension to obtain information -about the budget from Vulkan device. VMA is able to use this extension automatically. -When not enabled, the allocator behaves same way, but then it estimates current usage -and available budget based on its internal information and Vulkan memory heap sizes, -which may be less precise. In order to use this extension: - -1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2 - required by it are available and enable them. Please note that the first is a device - extension and the second is instance extension! -2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object. -3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from - Vulkan inside of it to avoid overhead of querying it with every allocation. - -\section staying_within_budget_controlling_memory_usage Controlling memory usage - -There are many ways in which you can try to stay within the budget. - -First, when making new allocation requires allocating a new memory block, the library -tries not to exceed the budget automatically. If a block with default recommended size -(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even -dedicated memory for just this resource. - -If the size of the requested resource plus current memory usage is more than the -budget, by default the library still tries to create it, leaving it to the Vulkan -implementation whether the allocation succeeds or fails. You can change this behavior -by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is -not made if it would exceed the budget or if the budget is already exceeded. -VMA then tries to make the allocation from the next eligible Vulkan memory type. -The all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. -Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag -when creating resources that are not essential for the application (e.g. the texture -of a specific object) and not to pass it when creating critically important resources -(e.g. render targets). - -On AMD graphics cards there is a custom vendor extension available: VK_AMD_memory_overallocation_behavior -that allows to control the behavior of the Vulkan implementation in out-of-memory cases - -whether it should fail with an error code or still allow the allocation. -Usage of this extension involves only passing extra structure on Vulkan device creation, -so it is out of scope of this library. - -Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure -a new allocation is created only when it fits inside one of the existing memory blocks. -If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. -This also ensures that the function call is very fast because it never goes to Vulkan -to obtain a new block. - -\note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount -set to more than 0 will currently try to allocate memory blocks without checking whether they -fit within budget. - - -\page resource_aliasing Resource aliasing (overlap) - -New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory -management, give an opportunity to alias (overlap) multiple resources in the -same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL). -It can be useful to save video memory, but it must be used with caution. - -For example, if you know the flow of your whole render frame in advance, you -are going to use some intermediate textures or buffers only during a small range of render passes, -and you know these ranges don't overlap in time, you can bind these resources to -the same place in memory, even if they have completely different parameters (width, height, format etc.). - -![Resource aliasing (overlap)](../gfx/Aliasing.png) - -Such scenario is possible using VMA, but you need to create your images manually. -Then you need to calculate parameters of an allocation to be made using formula: - -- allocation size = max(size of each image) -- allocation alignment = max(alignment of each image) -- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image) - -Following example shows two different images bound to the same place in memory, -allocated to fit largest of them. - -\code -// A 512x512 texture to be sampled. -VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; -img1CreateInfo.imageType = VK_IMAGE_TYPE_2D; -img1CreateInfo.extent.width = 512; -img1CreateInfo.extent.height = 512; -img1CreateInfo.extent.depth = 1; -img1CreateInfo.mipLevels = 10; -img1CreateInfo.arrayLayers = 1; -img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB; -img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; -img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; -img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; -img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; - -// A full screen texture to be used as color attachment. -VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; -img2CreateInfo.imageType = VK_IMAGE_TYPE_2D; -img2CreateInfo.extent.width = 1920; -img2CreateInfo.extent.height = 1080; -img2CreateInfo.extent.depth = 1; -img2CreateInfo.mipLevels = 1; -img2CreateInfo.arrayLayers = 1; -img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; -img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; -img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; -img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; -img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; - -VkImage img1; -res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1); -VkImage img2; -res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2); - -VkMemoryRequirements img1MemReq; -vkGetImageMemoryRequirements(device, img1, &img1MemReq); -VkMemoryRequirements img2MemReq; -vkGetImageMemoryRequirements(device, img2, &img2MemReq); - -VkMemoryRequirements finalMemReq = {}; -finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size); -finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment); -finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits; -// Validate if(finalMemReq.memoryTypeBits != 0) - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - -VmaAllocation alloc; -res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr); - -res = vmaBindImageMemory(allocator, alloc, img1); -res = vmaBindImageMemory(allocator, alloc, img2); - -// You can use img1, img2 here, but not at the same time! - -vmaFreeMemory(allocator, alloc); -vkDestroyImage(allocator, img2, nullptr); -vkDestroyImage(allocator, img1, nullptr); -\endcode - -Remember that using resources that alias in memory requires proper synchronization. -You need to issue a memory barrier to make sure commands that use `img1` and `img2` -don't overlap on GPU timeline. -You also need to treat a resource after aliasing as uninitialized - containing garbage data. -For example, if you use `img1` and then want to use `img2`, you need to issue -an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`. - -Additional considerations: - -- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases. -See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag. -- You can create more complex layout where different images and buffers are bound -at different offsets inside one large allocation. For example, one can imagine -a big texture used in some render passes, aliasing with a set of many small buffers -used between in some further passes. To bind a resource at non-zero offset in an allocation, -use vmaBindBufferMemory2() / vmaBindImageMemory2(). -- Before allocating memory for the resources you want to alias, check `memoryTypeBits` -returned in memory requirements of each resource to make sure the bits overlap. -Some GPUs may expose multiple memory types suitable e.g. only for buffers or -images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your -resources may be disjoint. Aliasing them is not possible in that case. - - -\page custom_memory_pools Custom memory pools - -A memory pool contains a number of `VkDeviceMemory` blocks. -The library automatically creates and manages default pool for each memory type available on the device. -Default memory pool automatically grows in size. -Size of allocated blocks is also variable and managed automatically. - -You can create custom pool and allocate memory out of it. -It can be useful if you want to: - -- Keep certain kind of allocations separate from others. -- Enforce particular, fixed size of Vulkan memory blocks. -- Limit maximum amount of Vulkan memory allocated for that pool. -- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool. -- Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in - #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain. -- Perform defragmentation on a specific subset of your allocations. - -To use custom memory pools: - --# Fill VmaPoolCreateInfo structure. --# Call vmaCreatePool() to obtain #VmaPool handle. --# When making an allocation, set VmaAllocationCreateInfo::pool to this handle. - You don't need to specify any other parameters of this structure, like `usage`. - -Example: - -\code -// Find memoryTypeIndex for the pool. -VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -sampleBufCreateInfo.size = 0x10000; // Doesn't matter. -sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo sampleAllocCreateInfo = {}; -sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; - -uint32_t memTypeIndex; -VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator, - &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex); -// Check res... - -// Create a pool that can have at most 2 blocks, 128 MiB each. -VmaPoolCreateInfo poolCreateInfo = {}; -poolCreateInfo.memoryTypeIndex = memTypeIndex; -poolCreateInfo.blockSize = 128ull * 1024 * 1024; -poolCreateInfo.maxBlockCount = 2; - -VmaPool pool; -res = vmaCreatePool(allocator, &poolCreateInfo, &pool); -// Check res... - -// Allocate a buffer out of it. -VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufCreateInfo.size = 1024; -bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.pool = pool; - -VkBuffer buf; -VmaAllocation alloc; -res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); -// Check res... -\endcode - -You have to free all allocations made from this pool before destroying it. - -\code -vmaDestroyBuffer(allocator, buf, alloc); -vmaDestroyPool(allocator, pool); -\endcode - -New versions of this library support creating dedicated allocations in custom pools. -It is supported only when VmaPoolCreateInfo::blockSize = 0. -To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and -VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. - -\note Excessive use of custom pools is a common mistake when using this library. -Custom pools may be useful for special purposes - when you want to -keep certain type of resources separate e.g. to reserve minimum amount of memory -for them or limit maximum amount of memory they can occupy. For most -resources this is not needed and so it is not recommended to create #VmaPool -objects and allocations out of them. Allocating from the default pool is sufficient. - - -\section custom_memory_pools_MemTypeIndex Choosing memory type index - -When creating a pool, you must explicitly specify memory type index. -To find the one suitable for your buffers or images, you can use helper functions -vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo(). -You need to provide structures with example parameters of buffers or images -that you are going to create in that pool. - -\code -VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -exampleBufCreateInfo.size = 1024; // Doesn't matter -exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; - -uint32_t memTypeIndex; -vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex); - -VmaPoolCreateInfo poolCreateInfo = {}; -poolCreateInfo.memoryTypeIndex = memTypeIndex; -// ... -\endcode - -When creating buffers/images allocated in that pool, provide following parameters: - -- `VkBufferCreateInfo`: Prefer to pass same parameters as above. - Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior. - Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers - or the other way around. -- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member. - Other members are ignored anyway. - -\section linear_algorithm Linear allocation algorithm - -Each Vulkan memory block managed by this library has accompanying metadata that -keeps track of used and unused regions. By default, the metadata structure and -algorithm tries to find best place for new allocations among free regions to -optimize memory usage. This way you can allocate and free objects in any order. - -![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png) - -Sometimes there is a need to use simpler, linear allocation algorithm. You can -create custom pool that uses such algorithm by adding flag -#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating -#VmaPool object. Then an alternative metadata management is used. It always -creates new allocations after last one and doesn't reuse free regions after -allocations freed in the middle. It results in better allocation performance and -less memory consumed by metadata. - -![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png) - -With this one flag, you can create a custom pool that can be used in many ways: -free-at-once, stack, double stack, and ring buffer. See below for details. -You don't need to specify explicitly which of these options you are going to use - it is detected automatically. - -\subsection linear_algorithm_free_at_once Free-at-once - -In a pool that uses linear algorithm, you still need to free all the allocations -individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free -them in any order. New allocations are always made after last one - free space -in the middle is not reused. However, when you release all the allocation and -the pool becomes empty, allocation starts from the beginning again. This way you -can use linear algorithm to speed up creation of allocations that you are going -to release all at once. - -![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png) - -This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount -value that allows multiple memory blocks. - -\subsection linear_algorithm_stack Stack - -When you free an allocation that was created last, its space can be reused. -Thanks to this, if you always release allocations in the order opposite to their -creation (LIFO - Last In First Out), you can achieve behavior of a stack. - -![Stack](../gfx/Linear_allocator_4_stack.png) - -This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount -value that allows multiple memory blocks. - -\subsection linear_algorithm_double_stack Double stack - -The space reserved by a custom pool with linear algorithm may be used by two -stacks: - -- First, default one, growing up from offset 0. -- Second, "upper" one, growing down from the end towards lower offsets. - -To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT -to VmaAllocationCreateInfo::flags. - -![Double stack](../gfx/Linear_allocator_7_double_stack.png) - -Double stack is available only in pools with one memory block - -VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. - -When the two stacks' ends meet so there is not enough space between them for a -new allocation, such allocation fails with usual -`VK_ERROR_OUT_OF_DEVICE_MEMORY` error. - -\subsection linear_algorithm_ring_buffer Ring buffer - -When you free some allocations from the beginning and there is not enough free space -for a new one at the end of a pool, allocator's "cursor" wraps around to the -beginning and starts allocation there. Thanks to this, if you always release -allocations in the same order as you created them (FIFO - First In First Out), -you can achieve behavior of a ring buffer / queue. - -![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png) - -Ring buffer is available only in pools with one memory block - -VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. - -\note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. - - -\page defragmentation Defragmentation - -Interleaved allocations and deallocations of many objects of varying size can -cause fragmentation over time, which can lead to a situation where the library is unable -to find a continuous range of free memory for a new allocation despite there is -enough free space, just scattered across many small free ranges between existing -allocations. - -To mitigate this problem, you can use defragmentation feature. -It doesn't happen automatically though and needs your cooperation, -because VMA is a low level library that only allocates memory. -It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures. -It cannot copy their contents as it doesn't record any commands to a command buffer. - -Example: - -\code -VmaDefragmentationInfo defragInfo = {}; -defragInfo.pool = myPool; -defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT; - -VmaDefragmentationContext defragCtx; -VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx); -// Check res... - -for(;;) -{ - VmaDefragmentationPassMoveInfo pass; - res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass); - if(res == VK_SUCCESS) - break; - else if(res != VK_INCOMPLETE) - // Handle error... - - for(uint32_t i = 0; i < pass.moveCount; ++i) - { - // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents. - VmaAllocationInfo allocInfo; - vmaGetAllocationInfo(allocator, pMoves[i].srcAllocation, &allocInfo); - MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData; - - // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset. - VkImageCreateInfo imgCreateInfo = ... - VkImage newImg; - res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg); - // Check res... - res = vmaBindImageMemory(allocator, pMoves[i].dstTmpAllocation, newImg); - // Check res... - - // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place. - vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...); - } - - // Make sure the copy commands finished executing. - vkWaitForFences(...); - - // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation. - for(uint32_t i = 0; i < pass.moveCount; ++i) - { - // ... - vkDestroyImage(device, resData->img, nullptr); - } - - // Update appropriate descriptors to point to the new places... - - res = vmaEndDefragmentationPass(allocator, defragCtx, &pass); - if(res == VK_SUCCESS) - break; - else if(res != VK_INCOMPLETE) - // Handle error... -} - -vmaEndDefragmentation(allocator, defragCtx, nullptr); -\endcode - -Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage() -create/destroy an allocation and a buffer/image at once, these are just a shortcut for -creating the resource, allocating memory, and binding them together. -Defragmentation works on memory allocations only. You must handle the rest manually. -Defragmentation is an iterative process that should repreat "passes" as long as related functions -return `VK_INCOMPLETE` not `VK_SUCCESS`. -In each pass: - -1. vmaBeginDefragmentationPass() function call: - - Calculates and returns the list of allocations to be moved in this pass. - Note this can be a time-consuming process. - - Reserves destination memory for them by creating temporary destination allocations - that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo(). -2. Inside the pass, **you should**: - - Inspect the returned list of allocations to be moved. - - Create new buffers/images and bind them at the returned destination temporary allocations. - - Copy data from source to destination resources if necessary. - - Destroy the source buffers/images, but NOT their allocations. -3. vmaEndDefragmentationPass() function call: - - Frees the source memory reserved for the allocations that are moved. - - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory. - - Frees `VkDeviceMemory` blocks that became empty. - -Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter. -Defragmentation algorithm tries to move all suitable allocations. -You can, however, refuse to move some of them inside a defragmentation pass, by setting -`pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. -This is not recommended and may result in suboptimal packing of the allocations after defragmentation. -If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool. - -Inside a pass, for each allocation that should be moved: - -- You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`. - - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass(). -- If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared, - filled, and used temporarily in each rendering frame, you can just recreate this image - without copying its data. -- If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU - using `memcpy()`. -- If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. - This will cancel the move. - - vmaEndDefragmentationPass() will then free the destination memory - not the source memory of the allocation, leaving it unchanged. -- If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time), - you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. - - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object. - -You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool -(like in the example above) or all the default pools by setting this member to null. - -Defragmentation is always performed in each pool separately. -Allocations are never moved between different Vulkan memory types. -The size of the destination memory reserved for a moved allocation is the same as the original one. -Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation. -Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones. - -You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved -in each pass, e.g. to call it in sync with render frames and not to experience too big hitches. -See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass. - -It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA -usage, possibly from multiple threads, with the exception that allocations -returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended. - -Mapping is preserved on allocations that are moved during defragmentation. -Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations -are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried -using VmaAllocationInfo::pMappedData. - -\note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. - - -\page statistics Statistics - -This library contains several functions that return information about its internal state, -especially the amount of memory allocated from Vulkan. - -\section statistics_numeric_statistics Numeric statistics - -If you need to obtain basic statistics about memory usage per heap, together with current budget, -you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget. -This is useful to keep track of memory usage and stay withing budget -(see also \ref staying_within_budget). -Example: - -\code -uint32_t heapIndex = ... - -VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; -vmaGetHeapBudgets(allocator, budgets); - -printf("My heap currently has %u allocations taking %llu B,\n", - budgets[heapIndex].statistics.allocationCount, - budgets[heapIndex].statistics.allocationBytes); -printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n", - budgets[heapIndex].statistics.blockCount, - budgets[heapIndex].statistics.blockBytes); -printf("Vulkan reports total usage %llu B with budget %llu B.\n", - budgets[heapIndex].usage, - budgets[heapIndex].budget); -\endcode - -You can query for more detailed statistics per memory heap, type, and totals, -including minimum and maximum allocation size and unused range size, -by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics. -This function is slower though, as it has to traverse all the internal data structures, -so it should be used only for debugging purposes. - -You can query for statistics of a custom pool using function vmaGetPoolStatistics() -or vmaCalculatePoolStatistics(). - -You can query for information about a specific allocation using function vmaGetAllocationInfo(). -It fill structure #VmaAllocationInfo. - -\section statistics_json_dump JSON dump - -You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString(). -The result is guaranteed to be correct JSON. -It uses ANSI encoding. -Any strings provided by user (see [Allocation names](@ref allocation_names)) -are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding, -this JSON string can be treated as using this encoding. -It must be freed using function vmaFreeStatsString(). - -The format of this JSON string is not part of official documentation of the library, -but it will not change in backward-incompatible way without increasing library major version number -and appropriate mention in changelog. - -The JSON string contains all the data that can be obtained using vmaCalculateStatistics(). -It can also contain detailed map of allocated memory blocks and their regions - -free and occupied by allocations. -This allows e.g. to visualize the memory or assess fragmentation. - - -\page allocation_annotation Allocation names and user data - -\section allocation_user_data Allocation user data - -You can annotate allocations with your own information, e.g. for debugging purposes. -To do that, fill VmaAllocationCreateInfo::pUserData field when creating -an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer, -some handle, index, key, ordinal number or any other value that would associate -the allocation with your custom metadata. -It it useful to identify appropriate data structures in your engine given #VmaAllocation, -e.g. when doing \ref defragmentation. - -\code -VkBufferCreateInfo bufCreateInfo = ... - -MyBufferMetadata* pMetadata = CreateBufferMetadata(); - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.pUserData = pMetadata; - -VkBuffer buffer; -VmaAllocation allocation; -vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr); -\endcode - -The pointer may be later retrieved as VmaAllocationInfo::pUserData: - -\code -VmaAllocationInfo allocInfo; -vmaGetAllocationInfo(allocator, allocation, &allocInfo); -MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData; -\endcode - -It can also be changed using function vmaSetAllocationUserData(). - -Values of (non-zero) allocations' `pUserData` are printed in JSON report created by -vmaBuildStatsString() in hexadecimal form. - -\section allocation_names Allocation names - -An allocation can also carry a null-terminated string, giving a name to the allocation. -To set it, call vmaSetAllocationName(). -The library creates internal copy of the string, so the pointer you pass doesn't need -to be valid for whole lifetime of the allocation. You can free it after the call. - -\code -std::string imageName = "Texture: "; -imageName += fileName; -vmaSetAllocationName(allocator, allocation, imageName.c_str()); -\endcode - -The string can be later retrieved by inspecting VmaAllocationInfo::pName. -It is also printed in JSON report created by vmaBuildStatsString(). - -\note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. -You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library. - - -\page virtual_allocator Virtual allocator - -As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator". -It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block". -You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan. -A common use case is sub-allocation of pieces of one large GPU buffer. - -\section virtual_allocator_creating_virtual_block Creating virtual block - -To use this functionality, there is no main "allocator" object. -You don't need to have #VmaAllocator object created. -All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator: - --# Fill in #VmaVirtualBlockCreateInfo structure. --# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object. - -Example: - -\code -VmaVirtualBlockCreateInfo blockCreateInfo = {}; -blockCreateInfo.size = 1048576; // 1 MB - -VmaVirtualBlock block; -VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block); -\endcode - -\section virtual_allocator_making_virtual_allocations Making virtual allocations - -#VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions -using the same code as the main Vulkan memory allocator. -Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type -that represents an opaque handle to an allocation withing the virtual block. - -In order to make such allocation: - --# Fill in #VmaVirtualAllocationCreateInfo structure. --# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation. - You can also receive `VkDeviceSize offset` that was assigned to the allocation. - -Example: - -\code -VmaVirtualAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.size = 4096; // 4 KB - -VmaVirtualAllocation alloc; -VkDeviceSize offset; -res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset); -if(res == VK_SUCCESS) -{ - // Use the 4 KB of your memory starting at offset. -} -else -{ - // Allocation failed - no space for it could be found. Handle this error! -} -\endcode - -\section virtual_allocator_deallocation Deallocation - -When no longer needed, an allocation can be freed by calling vmaVirtualFree(). -You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate() -called for the same #VmaVirtualBlock. - -When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock(). -All allocations must be freed before the block is destroyed, which is checked internally by an assert. -However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once - -a feature not available in normal Vulkan memory allocator. Example: - -\code -vmaVirtualFree(block, alloc); -vmaDestroyVirtualBlock(block); -\endcode - -\section virtual_allocator_allocation_parameters Allocation parameters - -You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData(). -Its default value is null. -It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some -larger data structure containing more information. Example: - -\code -struct CustomAllocData -{ - std::string m_AllocName; -}; -CustomAllocData* allocData = new CustomAllocData(); -allocData->m_AllocName = "My allocation 1"; -vmaSetVirtualAllocationUserData(block, alloc, allocData); -\endcode - -The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function -vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo. -If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation! -Example: - -\code -VmaVirtualAllocationInfo allocInfo; -vmaGetVirtualAllocationInfo(block, alloc, &allocInfo); -delete (CustomAllocData*)allocInfo.pUserData; - -vmaVirtualFree(block, alloc); -\endcode - -\section virtual_allocator_alignment_and_units Alignment and units - -It feels natural to express sizes and offsets in bytes. -If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member -VmaVirtualAllocationCreateInfo::alignment to request it. Example: - -\code -VmaVirtualAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.size = 4096; // 4 KB -allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B - -VmaVirtualAllocation alloc; -res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr); -\endcode - -Alignments of different allocations made from one block may vary. -However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`, -you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes. -It might be more convenient, but you need to make sure to use this new unit consistently in all the places: - -- VmaVirtualBlockCreateInfo::size -- VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment -- Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset - -\section virtual_allocator_statistics Statistics - -You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics() -(to get brief statistics that are fast to calculate) -or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate). -The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator. -Example: - -\code -VmaStatistics stats; -vmaGetVirtualBlockStatistics(block, &stats); -printf("My virtual block has %llu bytes used by %u virtual allocations\n", - stats.allocationBytes, stats.allocationCount); -\endcode - -You can also request a full list of allocations and free regions as a string in JSON format by calling -vmaBuildVirtualBlockStatsString(). -Returned string must be later freed using vmaFreeVirtualBlockStatsString(). -The format of this string differs from the one returned by the main Vulkan allocator, but it is similar. - -\section virtual_allocator_additional_considerations Additional considerations - -The "virtual allocator" functionality is implemented on a level of individual memory blocks. -Keeping track of a whole collection of blocks, allocating new ones when out of free space, -deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user. - -Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory. -See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT). -You can find their description in chapter \ref custom_memory_pools. -Allocation strategies are also supported. -See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT). - -Following features are supported only by the allocator of the real GPU memory and not by virtual allocations: -buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`. - - -\page debugging_memory_usage Debugging incorrect memory usage - -If you suspect a bug with memory usage, like usage of uninitialized memory or -memory being overwritten out of bounds of an allocation, -you can use debug features of this library to verify this. - -\section debugging_memory_usage_initialization Memory initialization - -If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used, -you can enable automatic memory initialization to verify this. -To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1. - -\code -#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1 -#include "vk_mem_alloc.h" -\endcode - -It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`. -Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`. -Memory is automatically mapped and unmapped if necessary. - -If you find these values while debugging your program, good chances are that you incorrectly -read Vulkan memory that is allocated but not initialized, or already freed, respectively. - -Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped. -It works also with dedicated allocations. - -\section debugging_memory_usage_margins Margins - -By default, allocations are laid out in memory blocks next to each other if possible -(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`). - -![Allocations without margin](../gfx/Margins_1.png) - -Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified -number of bytes as a margin after every allocation. - -\code -#define VMA_DEBUG_MARGIN 16 -#include "vk_mem_alloc.h" -\endcode - -![Allocations with margin](../gfx/Margins_2.png) - -If your bug goes away after enabling margins, it means it may be caused by memory -being overwritten outside of allocation boundaries. It is not 100% certain though. -Change in application behavior may also be caused by different order and distribution -of allocations across memory blocks after margins are applied. - -Margins work with all types of memory. - -Margin is applied only to allocations made out of memory blocks and not to dedicated -allocations, which have their own memory block of specific size. -It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag -or those automatically decided to put into dedicated allocations, e.g. due to its -large size or recommended by VK_KHR_dedicated_allocation extension. - -Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space. - -Note that enabling margins increases memory usage and fragmentation. - -Margins do not apply to \ref virtual_allocator. - -\section debugging_memory_usage_corruption_detection Corruption detection - -You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation -of contents of the margins. - -\code -#define VMA_DEBUG_MARGIN 16 -#define VMA_DEBUG_DETECT_CORRUPTION 1 -#include "vk_mem_alloc.h" -\endcode - -When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN` -(it must be multiply of 4) after every allocation is filled with a magic number. -This idea is also know as "canary". -Memory is automatically mapped and unmapped if necessary. - -This number is validated automatically when the allocation is destroyed. -If it is not equal to the expected value, `VMA_ASSERT()` is executed. -It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation, -which indicates a serious bug. - -You can also explicitly request checking margins of all allocations in all memory blocks -that belong to specified memory types by using function vmaCheckCorruption(), -or in memory blocks that belong to specified custom pool, by using function -vmaCheckPoolCorruption(). - -Margin validation (corruption detection) works only for memory types that are -`HOST_VISIBLE` and `HOST_COHERENT`. - - -\page opengl_interop OpenGL Interop - -VMA provides some features that help with interoperability with OpenGL. - -\section opengl_interop_exporting_memory Exporting memory - -If you want to attach `VkExportMemoryAllocateInfoKHR` structure to `pNext` chain of memory allocations made by the library: - -It is recommended to create \ref custom_memory_pools for such allocations. -Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext -while creating the custom pool. -Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool, -not only while creating it, as no copy of the structure is made, -but its original pointer is used for each allocation instead. - -If you want to export all memory allocated by the library from certain memory types, -also dedicated allocations or other allocations made from default pools, -an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes. -It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library -through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type. -Please note that new versions of the library also support dedicated allocations created in custom pools. - -You should not mix these two methods in a way that allows to apply both to the same memory type. -Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`. - - -\section opengl_interop_custom_alignment Custom alignment - -Buffers or images exported to a different API like OpenGL may require a different alignment, -higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`. -To impose such alignment: - -It is recommended to create \ref custom_memory_pools for such allocations. -Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation -to be made out of this pool. -The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image -from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically. - -If you want to create a buffer with a specific minimum alignment out of default pools, -use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`. - -Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated -allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block. -Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation. - - -\page usage_patterns Recommended usage patterns - -Vulkan gives great flexibility in memory allocation. -This chapter shows the most common patterns. - -See also slides from talk: -[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New) - - -\section usage_patterns_gpu_only GPU-only resource - -When: -Any resources that you frequently write and read on GPU, -e.g. images used as color attachments (aka "render targets"), depth-stencil attachments, -images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)"). - -What to do: -Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. - -\code -VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; -imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; -imgCreateInfo.extent.width = 3840; -imgCreateInfo.extent.height = 2160; -imgCreateInfo.extent.depth = 1; -imgCreateInfo.mipLevels = 1; -imgCreateInfo.arrayLayers = 1; -imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; -imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; -imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; -imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; -imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; -allocCreateInfo.priority = 1.0f; - -VkImage img; -VmaAllocation alloc; -vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); -\endcode - -Also consider: -Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, -especially if they are large or if you plan to destroy and recreate them with different sizes -e.g. when display resolution changes. -Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later. -When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation -to decrease chances to be evicted to system memory by the operating system. - -\section usage_patterns_staging_copy_upload Staging copy for upload - -When: -A "staging" buffer than you want to map and fill from CPU code, then use as a source od transfer -to some GPU resource. - -What to do: -Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT. -Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`. - -\code -VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufCreateInfo.size = 65536; -bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | - VMA_ALLOCATION_CREATE_MAPPED_BIT; - -VkBuffer buf; -VmaAllocation alloc; -VmaAllocationInfo allocInfo; -vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - -... - -memcpy(allocInfo.pMappedData, myData, myDataSize); -\endcode - -Also consider: -You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped -using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above. - - -\section usage_patterns_readback Readback - -When: -Buffers for data written by or transferred from the GPU that you want to read back on the CPU, -e.g. results of some computations. - -What to do: -Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. -Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` -and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. - -\code -VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufCreateInfo.size = 65536; -bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | - VMA_ALLOCATION_CREATE_MAPPED_BIT; - -VkBuffer buf; -VmaAllocation alloc; -VmaAllocationInfo allocInfo; -vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - -... - -const float* downloadedData = (const float*)allocInfo.pMappedData; -\endcode - - -\section usage_patterns_advanced_data_uploading Advanced data uploading - -For resources that you frequently write on CPU via mapped pointer and -freqnently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible: - --# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory, - even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card, - and make the device reach out to that resource directly. - - Reads performed by the device will then go through PCI Express bus. - The performace of this access may be limited, but it may be fine depending on the size - of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity - of access. --# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips), - a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL` - (fast to access from the GPU). Then, it is likely the best choice for such type of resource. --# Systems with a discrete graphics card and separate video memory may or may not expose - a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR). - If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS) - that is available to CPU for mapping. - - Writes performed by the host to that memory go through PCI Express bus. - The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0, - as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads. --# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory, - a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them. - -Thankfully, VMA offers an aid to create and use such resources in the the way optimal -for the current Vulkan device. To help the library make the best choice, -use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with -#VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT. -It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR), -but if no such memory type is available or allocation from it fails -(PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS), -it will fall back to `DEVICE_LOCAL` memory for fast GPU access. -It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`, -so you need to create another "staging" allocation and perform explicit transfers. - -\code -VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufCreateInfo.size = 65536; -bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | - VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | - VMA_ALLOCATION_CREATE_MAPPED_BIT; - -VkBuffer buf; -VmaAllocation alloc; -VmaAllocationInfo allocInfo; -vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - -VkMemoryPropertyFlags memPropFlags; -vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags); - -if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) -{ - // Allocation ended up in a mappable memory and is already mapped - write to it directly. - - // [Executed in runtime]: - memcpy(allocInfo.pMappedData, myData, myDataSize); -} -else -{ - // Allocation ended up in a non-mappable memory - need to transfer. - VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; - stagingBufCreateInfo.size = 65536; - stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; - - VmaAllocationCreateInfo stagingAllocCreateInfo = {}; - stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; - stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | - VMA_ALLOCATION_CREATE_MAPPED_BIT; - - VkBuffer stagingBuf; - VmaAllocation stagingAlloc; - VmaAllocationInfo stagingAllocInfo; - vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo, - &stagingBuf, &stagingAlloc, stagingAllocInfo); - - // [Executed in runtime]: - memcpy(stagingAllocInfo.pMappedData, myData, myDataSize); - //vkCmdPipelineBarrier: VK_ACCESS_HOST_WRITE_BIT --> VK_ACCESS_TRANSFER_READ_BIT - VkBufferCopy bufCopy = { - 0, // srcOffset - 0, // dstOffset, - myDataSize); // size - vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy); -} -\endcode - -\section usage_patterns_other_use_cases Other use cases - -Here are some other, less obvious use cases and their recommended settings: - -- An image that is used only as transfer source and destination, but it should stay on the device, - as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame, - for temporal antialiasing or other temporal effects. - - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` - - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO -- An image that is used only as transfer source and destination, but it should be placed - in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict - least recently used textures from VRAM. - - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` - - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST, - as VMA needs a hint here to differentiate from the previous case. -- A buffer that you want to map and write from the CPU, directly read from the GPU - (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or - host memory due to its large size. - - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT` - - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST - - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT - - -\page configuration Configuration - -Please check "CONFIGURATION SECTION" in the code to find macros that you can define -before each include of this file or change directly in this file to provide -your own implementation of basic facilities like assert, `min()` and `max()` functions, -mutex, atomic etc. -The library uses its own implementation of containers by default, but you can switch to using -STL containers instead. - -For example, define `VMA_ASSERT(expr)` before including the library to provide -custom implementation of the assertion, compatible with your project. -By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration -and empty otherwise. - -\section config_Vulkan_functions Pointers to Vulkan functions - -There are multiple ways to import pointers to Vulkan functions in the library. -In the simplest case you don't need to do anything. -If the compilation or linking of your program or the initialization of the #VmaAllocator -doesn't work for you, you can try to reconfigure it. - -First, the allocator tries to fetch pointers to Vulkan functions linked statically, -like this: - -\code -m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; -\endcode - -If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`. - -Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions. -You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or -by using a helper library like [volk](https://github.com/zeux/volk). - -Third, VMA tries to fetch remaining pointers that are still null by calling -`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own. -You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr. -Other pointers will be fetched automatically. -If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`. - -Finally, all the function pointers required by the library (considering selected -Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null. - - -\section custom_memory_allocator Custom host memory allocator - -If you use custom allocator for CPU memory rather than default operator `new` -and `delete` from C++, you can make this library using your allocator as well -by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These -functions will be passed to Vulkan, as well as used by the library itself to -make any CPU-side allocations. - -\section allocation_callbacks Device memory allocation callbacks - -The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally. -You can setup callbacks to be informed about these calls, e.g. for the purpose -of gathering some statistics. To do it, fill optional member -VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. - -\section heap_memory_limit Device heap memory limit - -When device memory of certain heap runs out of free space, new allocations may -fail (returning error code) or they may succeed, silently pushing some existing_ -memory blocks from GPU VRAM to system RAM (which degrades performance). This -behavior is implementation-dependent - it depends on GPU vendor and graphics -driver. - -On AMD cards it can be controlled while creating Vulkan device object by using -VK_AMD_memory_overallocation_behavior extension, if available. - -Alternatively, if you want to test how your program behaves with limited amount of Vulkan device -memory available without switching your graphics card to one that really has -smaller VRAM, you can use a feature of this library intended for this purpose. -To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit. - - - -\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation - -VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve -performance on some GPUs. It augments Vulkan API with possibility to query -driver whether it prefers particular buffer or image to have its own, dedicated -allocation (separate `VkDeviceMemory` block) for better efficiency - to be able -to do some internal optimizations. The extension is supported by this library. -It will be used automatically when enabled. - -It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version -and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion, -you are all set. - -Otherwise, if you want to use it as an extension: - -1 . When creating Vulkan device, check if following 2 device extensions are -supported (call `vkEnumerateDeviceExtensionProperties()`). -If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`). - -- VK_KHR_get_memory_requirements2 -- VK_KHR_dedicated_allocation - -If you enabled these extensions: - -2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating -your #VmaAllocator to inform the library that you enabled required extensions -and you want the library to use them. - -\code -allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT; - -vmaCreateAllocator(&allocatorInfo, &allocator); -\endcode - -That is all. The extension will be automatically used whenever you create a -buffer using vmaCreateBuffer() or image using vmaCreateImage(). - -When using the extension together with Vulkan Validation Layer, you will receive -warnings like this: - -_vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._ - -It is OK, you should just ignore it. It happens because you use function -`vkGetBufferMemoryRequirements2KHR()` instead of standard -`vkGetBufferMemoryRequirements()`, while the validation layer seems to be -unaware of it. - -To learn more about this extension, see: - -- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation) -- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5) - - - -\page vk_ext_memory_priority VK_EXT_memory_priority - -VK_EXT_memory_priority is a device extension that allows to pass additional "priority" -value to Vulkan memory allocations that the implementation may use prefer certain -buffers and images that are critical for performance to stay in device-local memory -in cases when the memory is over-subscribed, while some others may be moved to the system memory. - -VMA offers convenient usage of this extension. -If you enable it, you can pass "priority" parameter when creating allocations or custom pools -and the library automatically passes the value to Vulkan using this extension. - -If you want to use this extension in connection with VMA, follow these steps: - -\section vk_ext_memory_priority_initialization Initialization - -1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. -Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority". - -2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. -Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned. -Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true. - -3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority" -to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. - -4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. -Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. -Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to -`VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`. - -5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you -have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT -to VmaAllocatorCreateInfo::flags. - -\section vk_ext_memory_priority_usage Usage - -When using this extension, you should initialize following member: - -- VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. -- VmaPoolCreateInfo::priority when creating a custom pool. - -It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`. -Memory allocated with higher value can be treated by the Vulkan implementation as higher priority -and so it can have lower chances of being pushed out to system memory, experiencing degraded performance. - -It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images -as dedicated and set high priority to them. For example: - -\code -VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; -imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; -imgCreateInfo.extent.width = 3840; -imgCreateInfo.extent.height = 2160; -imgCreateInfo.extent.depth = 1; -imgCreateInfo.mipLevels = 1; -imgCreateInfo.arrayLayers = 1; -imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; -imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; -imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; -imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; -imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; -allocCreateInfo.priority = 1.0f; - -VkImage img; -VmaAllocation alloc; -vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); -\endcode - -`priority` member is ignored in the following situations: - -- Allocations created in custom pools: They inherit the priority, along with all other allocation parameters - from the parametrs passed in #VmaPoolCreateInfo when the pool was created. -- Allocations created in default pools: They inherit the priority from the parameters - VMA used when creating default pools, which means `priority == 0.5f`. - - -\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory - -VK_AMD_device_coherent_memory is a device extension that enables access to -additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and -`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for -allocation of buffers intended for writing "breadcrumb markers" in between passes -or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases. - -When the extension is available but has not been enabled, Vulkan physical device -still exposes those memory types, but their usage is forbidden. VMA automatically -takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt -to allocate memory of such type is made. - -If you want to use this extension in connection with VMA, follow these steps: - -\section vk_amd_device_coherent_memory_initialization Initialization - -1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. -Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory". - -2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. -Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned. -Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true. - -3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory" -to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. - -4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. -Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. -Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to -`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`. - -5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you -have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT -to VmaAllocatorCreateInfo::flags. - -\section vk_amd_device_coherent_memory_usage Usage - -After following steps described above, you can create VMA allocations and custom pools -out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible -devices. There are multiple ways to do it, for example: - -- You can request or prefer to allocate out of such memory types by adding - `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags - or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with - other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage. -- If you manually found memory type index to use for this purpose, force allocation - from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`. - -\section vk_amd_device_coherent_memory_more_information More information - -To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html) - -Example use of this extension can be found in the code of the sample and test suite -accompanying this library. - - -\page enabling_buffer_device_address Enabling buffer device address - -Device extension VK_KHR_buffer_device_address -allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code. -It has been promoted to core Vulkan 1.2. - -If you want to use this feature in connection with VMA, follow these steps: - -\section enabling_buffer_device_address_initialization Initialization - -1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device. -Check if the extension is supported - if returned array of `VkExtensionProperties` contains -"VK_KHR_buffer_device_address". - -2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. -Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned. -Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true. - -3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add -"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. - -4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. -Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. -Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to -`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`. - -5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you -have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT -to VmaAllocatorCreateInfo::flags. - -\section enabling_buffer_device_address_usage Usage - -After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA. -The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to -allocated memory blocks wherever it might be needed. - -Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`. -The second part of this functionality related to "capture and replay" is not supported, -as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage. - -\section enabling_buffer_device_address_more_information More information - -To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address) - -Example use of this extension can be found in the code of the sample and test suite -accompanying this library. - -\page general_considerations General considerations - -\section general_considerations_thread_safety Thread safety - -- The library has no global state, so separate #VmaAllocator objects can be used - independently. - There should be no need to create multiple such objects though - one per `VkDevice` is enough. -- By default, all calls to functions that take #VmaAllocator as first parameter - are safe to call from multiple threads simultaneously because they are - synchronized internally when needed. - This includes allocation and deallocation from default memory pool, as well as custom #VmaPool. -- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT - flag, calls to functions that take such #VmaAllocator object must be - synchronized externally. -- Access to a #VmaAllocation object must be externally synchronized. For example, - you must not call vmaGetAllocationInfo() and vmaMapMemory() from different - threads at the same time if you pass the same #VmaAllocation object to these - functions. -- #VmaVirtualBlock is not safe to be used from multiple threads simultaneously. - -\section general_considerations_versioning_and_compatibility Versioning and compatibility - -The library uses [**Semantic Versioning**](https://semver.org/), -which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where: - -- Incremented Patch version means a release is backward- and forward-compatible, - introducing only some internal improvements, bug fixes, optimizations etc. - or changes that are out of scope of the official API described in this documentation. -- Incremented Minor version means a release is backward-compatible, - so existing code that uses the library should continue to work, while some new - symbols could have been added: new structures, functions, new values in existing - enums and bit flags, new structure members, but not new function parameters. -- Incrementing Major version means a release could break some backward compatibility. - -All changes between official releases are documented in file "CHANGELOG.md". - -\warning Backward compatiblity is considered on the level of C++ source code, not binary linkage. -Adding new members to existing structures is treated as backward compatible if initializing -the new members to binary zero results in the old behavior. -You should always fully initialize all library structures to zeros and not rely on their -exact binary size. - -\section general_considerations_validation_layer_warnings Validation layer warnings - -When using this library, you can meet following types of warnings issued by -Vulkan validation layer. They don't necessarily indicate a bug, so you may need -to just ignore them. - -- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.* - - It happens when VK_KHR_dedicated_allocation extension is enabled. - `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it. -- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.* - - It happens when you map a buffer or image, because the library maps entire - `VkDeviceMemory` block, where different types of images and buffers may end - up together, especially on GPUs with unified memory like Intel. -- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.* - - It may happen when you use [defragmentation](@ref defragmentation). - -\section general_considerations_allocation_algorithm Allocation algorithm - -The library uses following algorithm for allocation, in order: - --# Try to find free range of memory in existing blocks. --# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size. --# If failed, try to create such block with size / 2, size / 4, size / 8. --# If failed, try to allocate separate `VkDeviceMemory` for this allocation, - just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. --# If failed, choose other memory type that meets the requirements specified in - VmaAllocationCreateInfo and go to point 1. --# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. - -\section general_considerations_features_not_supported Features not supported - -Features deliberately excluded from the scope of this library: - --# **Data transfer.** Uploading (streaming) and downloading data of buffers and images - between CPU and GPU memory and related synchronization is responsibility of the user. - Defining some "texture" object that would automatically stream its data from a - staging copy in CPU memory to GPU memory would rather be a feature of another, - higher-level library implemented on top of VMA. - VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory. --# **Recreation of buffers and images.** Although the library has functions for - buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to - recreate these objects yourself after defragmentation. That is because the big - structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in - #VmaAllocation object. --# **Handling CPU memory allocation failures.** When dynamically creating small C++ - objects in CPU memory (not Vulkan memory), allocation failures are not checked - and handled gracefully, because that would complicate code significantly and - is usually not needed in desktop PC applications anyway. - Success of an allocation is just checked with an assert. --# **Code free of any compiler warnings.** Maintaining the library to compile and - work correctly on so many different platforms is hard enough. Being free of - any warnings, on any version of any compiler, is simply not feasible. - There are many preprocessor macros that make some variables unused, function parameters unreferenced, - or conditional expressions constant in some configurations. - The code of this library should not be bigger or more complicated just to silence these warnings. - It is recommended to disable such warnings instead. --# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but - are not going to be included into this repository. -*/ +// +// Copyright (c) 2017-2024 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H +#define AMD_VULKAN_MEMORY_ALLOCATOR_H + +/** \mainpage Vulkan Memory Allocator + +Version 3.1.0 + +Copyright (c) 2017-2024 Advanced Micro Devices, Inc. All rights reserved. \n +License: MIT \n +See also: [product page on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/), +[repository on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) + + +API documentation divided into groups: [Topics](topics.html) + +General documentation chapters: + +- User guide + - \subpage quick_start + - [Project setup](@ref quick_start_project_setup) + - [Initialization](@ref quick_start_initialization) + - [Resource allocation](@ref quick_start_resource_allocation) + - \subpage choosing_memory_type + - [Usage](@ref choosing_memory_type_usage) + - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags) + - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types) + - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools) + - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations) + - \subpage memory_mapping + - [Copy functions](@ref memory_mapping_copy_functions) + - [Mapping functions](@ref memory_mapping_mapping_functions) + - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory) + - [Cache flush and invalidate](@ref memory_mapping_cache_control) + - \subpage staying_within_budget + - [Querying for budget](@ref staying_within_budget_querying_for_budget) + - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage) + - \subpage resource_aliasing + - \subpage custom_memory_pools + - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex) + - [When not to use custom pools](@ref custom_memory_pools_when_not_use) + - [Linear allocation algorithm](@ref linear_algorithm) + - [Free-at-once](@ref linear_algorithm_free_at_once) + - [Stack](@ref linear_algorithm_stack) + - [Double stack](@ref linear_algorithm_double_stack) + - [Ring buffer](@ref linear_algorithm_ring_buffer) + - \subpage defragmentation + - \subpage statistics + - [Numeric statistics](@ref statistics_numeric_statistics) + - [JSON dump](@ref statistics_json_dump) + - \subpage allocation_annotation + - [Allocation user data](@ref allocation_user_data) + - [Allocation names](@ref allocation_names) + - \subpage virtual_allocator + - \subpage debugging_memory_usage + - [Memory initialization](@ref debugging_memory_usage_initialization) + - [Margins](@ref debugging_memory_usage_margins) + - [Corruption detection](@ref debugging_memory_usage_corruption_detection) + - [Leak detection features](@ref debugging_memory_usage_leak_detection) + - \subpage other_api_interop +- \subpage usage_patterns + - [GPU-only resource](@ref usage_patterns_gpu_only) + - [Staging copy for upload](@ref usage_patterns_staging_copy_upload) + - [Readback](@ref usage_patterns_readback) + - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading) + - [Other use cases](@ref usage_patterns_other_use_cases) +- \subpage configuration + - [Pointers to Vulkan functions](@ref config_Vulkan_functions) + - [Custom host memory allocator](@ref custom_memory_allocator) + - [Device memory allocation callbacks](@ref allocation_callbacks) + - [Device heap memory limit](@ref heap_memory_limit) +- Extension support + - \subpage vk_khr_dedicated_allocation + - \subpage enabling_buffer_device_address + - \subpage vk_ext_memory_priority + - \subpage vk_amd_device_coherent_memory +- \subpage general_considerations + - [Thread safety](@ref general_considerations_thread_safety) + - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility) + - [Validation layer warnings](@ref general_considerations_validation_layer_warnings) + - [Allocation algorithm](@ref general_considerations_allocation_algorithm) + - [Features not supported](@ref general_considerations_features_not_supported) + +\defgroup group_init Library initialization + +\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object. + +\defgroup group_alloc Memory allocation + +\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images. +Most basic ones being: vmaCreateBuffer(), vmaCreateImage(). + +\defgroup group_virtual Virtual allocator + +\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm +for user-defined purpose without allocating any real GPU memory. + +\defgroup group_stats Statistics + +\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format. +See documentation chapter: \ref statistics. +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#if !defined(VMA_VULKAN_VERSION) + #if defined(VK_VERSION_1_3) + #define VMA_VULKAN_VERSION 1003000 + #elif defined(VK_VERSION_1_2) + #define VMA_VULKAN_VERSION 1002000 + #elif defined(VK_VERSION_1_1) + #define VMA_VULKAN_VERSION 1001000 + #else + #define VMA_VULKAN_VERSION 1000000 + #endif +#endif + +#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS + extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; + extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; + extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; + extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; + extern PFN_vkAllocateMemory vkAllocateMemory; + extern PFN_vkFreeMemory vkFreeMemory; + extern PFN_vkMapMemory vkMapMemory; + extern PFN_vkUnmapMemory vkUnmapMemory; + extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; + extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; + extern PFN_vkBindBufferMemory vkBindBufferMemory; + extern PFN_vkBindImageMemory vkBindImageMemory; + extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; + extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; + extern PFN_vkCreateBuffer vkCreateBuffer; + extern PFN_vkDestroyBuffer vkDestroyBuffer; + extern PFN_vkCreateImage vkCreateImage; + extern PFN_vkDestroyImage vkDestroyImage; + extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; + #if VMA_VULKAN_VERSION >= 1001000 + extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; + extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; + extern PFN_vkBindBufferMemory2 vkBindBufferMemory2; + extern PFN_vkBindImageMemory2 vkBindImageMemory2; + extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; + #endif // #if VMA_VULKAN_VERSION >= 1001000 +#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES + +#if !defined(VMA_DEDICATED_ALLOCATION) + #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation + #define VMA_DEDICATED_ALLOCATION 1 + #else + #define VMA_DEDICATED_ALLOCATION 0 + #endif +#endif + +#if !defined(VMA_BIND_MEMORY2) + #if VK_KHR_bind_memory2 + #define VMA_BIND_MEMORY2 1 + #else + #define VMA_BIND_MEMORY2 0 + #endif +#endif + +#if !defined(VMA_MEMORY_BUDGET) + #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000) + #define VMA_MEMORY_BUDGET 1 + #else + #define VMA_MEMORY_BUDGET 0 + #endif +#endif + +// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers. +#if !defined(VMA_BUFFER_DEVICE_ADDRESS) + #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000 + #define VMA_BUFFER_DEVICE_ADDRESS 1 + #else + #define VMA_BUFFER_DEVICE_ADDRESS 0 + #endif +#endif + +// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers. +#if !defined(VMA_MEMORY_PRIORITY) + #if VK_EXT_memory_priority + #define VMA_MEMORY_PRIORITY 1 + #else + #define VMA_MEMORY_PRIORITY 0 + #endif +#endif + +// Defined to 1 when VK_KHR_maintenance4 device extension is defined in Vulkan headers. +#if !defined(VMA_KHR_MAINTENANCE4) + #if VK_KHR_maintenance4 + #define VMA_KHR_MAINTENANCE4 1 + #else + #define VMA_KHR_MAINTENANCE4 0 + #endif +#endif + +// Defined to 1 when VK_KHR_maintenance5 device extension is defined in Vulkan headers. +#if !defined(VMA_KHR_MAINTENANCE5) + #if VK_KHR_maintenance5 + #define VMA_KHR_MAINTENANCE5 1 + #else + #define VMA_KHR_MAINTENANCE5 0 + #endif +#endif + + +// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers. +#if !defined(VMA_EXTERNAL_MEMORY) + #if VK_KHR_external_memory + #define VMA_EXTERNAL_MEMORY 1 + #else + #define VMA_EXTERNAL_MEMORY 0 + #endif +#endif + +// Define these macros to decorate all public functions with additional code, +// before and after returned type, appropriately. This may be useful for +// exporting the functions when compiling VMA as a separate library. Example: +// #define VMA_CALL_PRE __declspec(dllexport) +// #define VMA_CALL_POST __cdecl +#ifndef VMA_CALL_PRE + #define VMA_CALL_PRE +#endif +#ifndef VMA_CALL_POST + #define VMA_CALL_POST +#endif + +// Define this macro to decorate pNext pointers with an attribute specifying the Vulkan +// structure that will be extended via the pNext chain. +#ifndef VMA_EXTENDS_VK_STRUCT + #define VMA_EXTENDS_VK_STRUCT(vkStruct) +#endif + +// Define this macro to decorate pointers with an attribute specifying the +// length of the array they point to if they are not null. +// +// The length may be one of +// - The name of another parameter in the argument list where the pointer is declared +// - The name of another member in the struct where the pointer is declared +// - The name of a member of a struct type, meaning the value of that member in +// the context of the call. For example +// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"), +// this means the number of memory heaps available in the device associated +// with the VmaAllocator being dealt with. +#ifndef VMA_LEN_IF_NOT_NULL + #define VMA_LEN_IF_NOT_NULL(len) +#endif + +// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang. +// see: https://clang.llvm.org/docs/AttributeReference.html#nullable +#ifndef VMA_NULLABLE + #ifdef __clang__ + #define VMA_NULLABLE _Nullable + #else + #define VMA_NULLABLE + #endif +#endif + +// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang. +// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull +#ifndef VMA_NOT_NULL + #ifdef __clang__ + #define VMA_NOT_NULL _Nonnull + #else + #define VMA_NOT_NULL + #endif +#endif + +// If non-dispatchable handles are represented as pointers then we can give +// then nullability annotations +#ifndef VMA_NOT_NULL_NON_DISPATCHABLE + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL + #else + #define VMA_NOT_NULL_NON_DISPATCHABLE + #endif +#endif + +#ifndef VMA_NULLABLE_NON_DISPATCHABLE + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE + #else + #define VMA_NULLABLE_NON_DISPATCHABLE + #endif +#endif + +#ifndef VMA_STATS_STRING_ENABLED + #define VMA_STATS_STRING_ENABLED 1 +#endif + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// +// +// INTERFACE +// +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE. +#ifndef _VMA_ENUM_DECLARATIONS + +/** +\addtogroup group_init +@{ +*/ + +/// Flags for created #VmaAllocator. +typedef enum VmaAllocatorCreateFlagBits +{ + /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. + + Using this flag may increase performance because internal mutexes are not used. + */ + VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, + /** \brief Enables usage of VK_KHR_dedicated_allocation extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + Using this extension will automatically allocate dedicated blocks of memory for + some buffers and images instead of suballocating place for them out of bigger + memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT + flag) when it is recommended by the driver. It may improve performance on some + GPUs. + + You may set this flag only if you found out that following device extensions are + supported, you enabled them while creating Vulkan device passed as + VmaAllocatorCreateInfo::device, and you want them to be used internally by this + library: + + - VK_KHR_get_memory_requirements2 (device extension) + - VK_KHR_dedicated_allocation (device extension) + + When this flag is set, you can experience following warnings reported by Vulkan + validation layer. You can ignore them. + + > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. + */ + VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, + /** + Enables usage of VK_KHR_bind_memory2 extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library. + + The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, + which allow to pass a chain of `pNext` structures while binding. + This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2(). + */ + VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, + /** + Enables usage of VK_EXT_memory_budget extension. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library, along with another instance extension + VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). + + The extension provides query for current memory usage and budget, which will probably + be more accurate than an estimation used by the library otherwise. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008, + /** + Enables usage of VK_AMD_device_coherent_memory extension. + + You may set this flag only if you: + + - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device, + - want it to be used internally by this library. + + The extension and accompanying device feature provide access to memory types with + `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags. + They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR. + + When the extension is not enabled, such memory types are still enumerated, but their usage is illegal. + To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type, + returning `VK_ERROR_FEATURE_NOT_PRESENT`. + */ + VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010, + /** + Enables usage of "buffer device address" feature, which allows you to use function + `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader. + + You may set this flag only if you: + + 1. (For Vulkan version < 1.2) Found as available and enabled device extension + VK_KHR_buffer_device_address. + This extension is promoted to core Vulkan 1.2. + 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`. + + When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA. + The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to + allocated memory blocks wherever it might be needed. + + For more information, see documentation chapter \ref enabling_buffer_device_address. + */ + VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020, + /** + Enables usage of VK_EXT_memory_priority extension in the library. + + You may set this flag only if you found available and enabled this device extension, + along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + + When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority + are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored. + + A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. + Larger values are higher priority. The granularity of the priorities is implementation-dependent. + It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`. + The value to be used for default priority is 0.5. + For more details, see the documentation of the VK_EXT_memory_priority extension. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040, + /** + Enables usage of VK_KHR_maintenance4 extension in the library. + + You may set this flag only if you found available and enabled this device extension, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + */ + VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT = 0x00000080, + /** + Enables usage of VK_KHR_maintenance5 extension in the library. + + You should set this flag if you found available and enabled this device extension, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + */ + VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT = 0x00000100, + + VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocatorCreateFlagBits; +/// See #VmaAllocatorCreateFlagBits. +typedef VkFlags VmaAllocatorCreateFlags; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/// \brief Intended usage of the allocated memory. +typedef enum VmaMemoryUsage +{ + /** No intended memory usage specified. + Use other members of VmaAllocationCreateInfo to specify your requirements. + */ + VMA_MEMORY_USAGE_UNKNOWN = 0, + /** + \deprecated Obsolete, preserved for backward compatibility. + Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_GPU_ONLY = 1, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`. + */ + VMA_MEMORY_USAGE_CPU_ONLY = 2, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_CPU_TO_GPU = 3, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. + */ + VMA_MEMORY_USAGE_GPU_TO_CPU = 4, + /** + \deprecated Obsolete, preserved for backward compatibility. + Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_CPU_COPY = 5, + /** + Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. + Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. + + Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. + + Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + */ + VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, + /** + Selects best memory type automatically. + This flag is recommended for most common use cases. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO = 7, + /** + Selects best memory type automatically with preference for GPU (device) memory. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8, + /** + Selects best memory type automatically with preference for CPU (host) memory. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9, + + VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF +} VmaMemoryUsage; + +/// Flags to be passed as VmaAllocationCreateInfo::flags. +typedef enum VmaAllocationCreateFlagBits +{ + /** \brief Set this flag if the allocation should have its own memory block. + + Use it for special, big resources, like fullscreen images used as attachments. + + If you use this flag while creating a buffer or an image, `VkMemoryDedicatedAllocateInfo` + structure is applied if possible. + */ + VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, + + /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. + + If new allocation cannot be placed in any of the existing blocks, allocation + fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + + You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and + #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. + */ + VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, + /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. + + Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. + + It is valid to use this flag for allocation made from memory type that is not + `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is + useful if you need an allocation that is efficient to use on GPU + (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that + support it (e.g. Intel GPU). + */ + VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, + /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead. + + Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a + null-terminated string. Instead of copying pointer value, a local copy of the + string is made and stored in allocation's `pName`. The string is automatically + freed together with the allocation. It is also used in vmaBuildStatsString(). + */ + VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, + /** Allocation will be created from upper stack in a double stack pool. + + This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, + /** Create both buffer/image and allocation, but don't bind them together. + It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. + The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). + Otherwise it is ignored. + + If you want to make sure the new buffer/image is not tied to the new memory allocation + through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block, + use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT. + */ + VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, + /** Create allocation only if additional device memory required for it, if any, won't exceed + memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + */ + VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, + /** \brief Set this flag if the allocated memory will have aliasing resources. + + Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified. + Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors. + */ + VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200, + /** + Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). + + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, + you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. + - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. + This includes allocations created in \ref custom_memory_pools. + + Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number, + never read or accessed randomly, so a memory type can be selected that is uncached and write-combined. + + \warning Violating this declaration may work correctly, but will likely be very slow. + Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;` + Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400, + /** + Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). + + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, + you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. + - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. + This includes allocations created in \ref custom_memory_pools. + + Declares that mapped memory can be read, written, and accessed in random order, + so a `HOST_CACHED` memory type is preferred. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800, + /** + Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT, + it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected + if it may improve performance. + + By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type + (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and + issue an explicit transfer to write/read your data. + To prepare for this possibility, don't forget to add appropriate flags like + `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000, + /** Allocation strategy that chooses smallest possible free range for the allocation + to minimize memory usage and fragmentation, possibly at the expense of allocation time. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000, + /** Allocation strategy that chooses first suitable free range for the allocation - + not necessarily in terms of the smallest offset but the one that is easiest and fastest to find + to minimize allocation time, possibly at the expense of allocation quality. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000, + /** Allocation strategy that chooses always the lowest offset in available space. + This is not the most efficient strategy but achieves highly packed data. + Used internally by defragmentation, not recommended in typical usage. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000, + /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT. + */ + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, + /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT. + */ + VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + /** A bit mask to extract only `STRATEGY` bits from entire set of flags. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MASK = + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + + VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocationCreateFlagBits; +/// See #VmaAllocationCreateFlagBits. +typedef VkFlags VmaAllocationCreateFlags; + +/// Flags to be passed as VmaPoolCreateInfo::flags. +typedef enum VmaPoolCreateFlagBits +{ + /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. + + This is an optional optimization flag. + + If you always allocate using vmaCreateBuffer(), vmaCreateImage(), + vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator + knows exact type of your allocations so it can handle Buffer-Image Granularity + in the optimal way. + + If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), + exact type of such allocations is not known, so allocator must be conservative + in handling Buffer-Image Granularity, which can lead to suboptimal allocation + (wasted memory). In that case, if you can make sure you always allocate only + buffers and linear images or only optimal images out of this pool, use this flag + to make allocator disregard Buffer-Image Granularity and so make allocations + faster and more optimal. + */ + VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, + + /** \brief Enables alternative, linear allocation algorithm in this pool. + + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. + + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. + For details, see documentation chapter \ref linear_algorithm. + */ + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, + + /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_POOL_CREATE_ALGORITHM_MASK = + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT, + + VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaPoolCreateFlagBits; +/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits. +typedef VkFlags VmaPoolCreateFlags; + +/// Flags to be passed as VmaDefragmentationInfo::flags. +typedef enum VmaDefragmentationFlagBits +{ + /* \brief Use simple but fast algorithm for defragmentation. + May not achieve best results but will require least time to compute and least allocations to copy. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1, + /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified. + Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2, + /* \brief Perform full defragmentation of memory. + Can result in notably more time to compute and allocations to copy, but will achieve best memory packing. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4, + /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make. + Only available when bufferImageGranularity is greater than 1, since it aims to reduce + alignment issues between different types of resources. + Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8, + + /// A bit mask to extract only `ALGORITHM` bits from entire set of flags. + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK = + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT, + + VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaDefragmentationFlagBits; +/// See #VmaDefragmentationFlagBits. +typedef VkFlags VmaDefragmentationFlags; + +/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove. +typedef enum VmaDefragmentationMoveOperation +{ + /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass(). + VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0, + /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged. + VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1, + /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed. + VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2, +} VmaDefragmentationMoveOperation; + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. +typedef enum VmaVirtualBlockCreateFlagBits +{ + /** \brief Enables alternative, linear allocation algorithm in this virtual block. + + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. + + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. + For details, see documentation chapter \ref linear_algorithm. + */ + VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001, + + /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK = + VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT, + + VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaVirtualBlockCreateFlagBits; +/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits. +typedef VkFlags VmaVirtualBlockCreateFlags; + +/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. +typedef enum VmaVirtualAllocationCreateFlagBits +{ + /** \brief Allocation will be created from upper stack in a double stack pool. + + This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT, + /** \brief Allocation strategy that tries to minimize memory usage. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, + /** \brief Allocation strategy that tries to minimize allocation time. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + /** Allocation strategy that chooses always the lowest offset in available space. + This is not the most efficient strategy but achieves highly packed data. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags. + + These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK, + + VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaVirtualAllocationCreateFlagBits; +/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits. +typedef VkFlags VmaVirtualAllocationCreateFlags; + +/** @} */ + +#endif // _VMA_ENUM_DECLARATIONS + +#ifndef _VMA_DATA_TYPES_DECLARATIONS + +/** +\addtogroup group_init +@{ */ + +/** \struct VmaAllocator +\brief Represents main object of this library initialized. + +Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it. +Call function vmaDestroyAllocator() to destroy it. + +It is recommended to create just one object of this type per `VkDevice` object, +right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed. +*/ +VK_DEFINE_HANDLE(VmaAllocator) + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \struct VmaPool +\brief Represents custom memory pool + +Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it. +Call function vmaDestroyPool() to destroy it. + +For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools). +*/ +VK_DEFINE_HANDLE(VmaPool) + +/** \struct VmaAllocation +\brief Represents single memory allocation. + +It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type +plus unique offset. + +There are multiple ways to create such object. +You need to fill structure VmaAllocationCreateInfo. +For more information see [Choosing memory type](@ref choosing_memory_type). + +Although the library provides convenience functions that create Vulkan buffer or image, +allocate memory for it and bind them together, +binding of the allocation to a buffer or an image is out of scope of the allocation itself. +Allocation object can exist without buffer/image bound, +binding can be done manually by the user, and destruction of it can be done +independently of destruction of the allocation. + +The object also remembers its size and some other information. +To retrieve this information, use function vmaGetAllocationInfo() and inspect +returned structure VmaAllocationInfo. +*/ +VK_DEFINE_HANDLE(VmaAllocation) + +/** \struct VmaDefragmentationContext +\brief An opaque object that represents started defragmentation process. + +Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it. +Call function vmaEndDefragmentation() to destroy it. +*/ +VK_DEFINE_HANDLE(VmaDefragmentationContext) + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \struct VmaVirtualAllocation +\brief Represents single memory allocation done inside VmaVirtualBlock. + +Use it as a unique identifier to virtual allocation within the single block. + +Use value `VK_NULL_HANDLE` to represent a null/invalid allocation. +*/ +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation) + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \struct VmaVirtualBlock +\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory. + +Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it. +For more information, see documentation chapter \ref virtual_allocator. + +This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally. +*/ +VK_DEFINE_HANDLE(VmaVirtualBlock) + +/** @} */ + +/** +\addtogroup group_init +@{ +*/ + +/// Callback function called after successful vkAllocateMemory. +typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void* VMA_NULLABLE pUserData); + +/// Callback function called before vkFreeMemory. +typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void* VMA_NULLABLE pUserData); + +/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. + +Provided for informative purpose, e.g. to gather statistics about number of +allocations or total amount of memory allocated in Vulkan. + +Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. +*/ +typedef struct VmaDeviceMemoryCallbacks +{ + /// Optional, can be null. + PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate; + /// Optional, can be null. + PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree; + /// Optional, can be null. + void* VMA_NULLABLE pUserData; +} VmaDeviceMemoryCallbacks; + +/** \brief Pointers to some Vulkan functions - a subset used by the library. + +Used in VmaAllocatorCreateInfo::pVulkanFunctions. +*/ +typedef struct VmaVulkanFunctions +{ + /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. + PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr; + /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. + PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr; + PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties; + PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties; + PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory; + PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory; + PFN_vkMapMemory VMA_NULLABLE vkMapMemory; + PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory; + PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges; + PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges; + PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory; + PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory; + PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements; + PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements; + PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer; + PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer; + PFN_vkCreateImage VMA_NULLABLE vkCreateImage; + PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage; + PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer; +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. + PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR; + /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. + PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR; +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension. + PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR; + /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension. + PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR; +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + /// Fetch from "vkGetPhysicalDeviceMemoryProperties2" on Vulkan >= 1.1, but you can also fetch it from "vkGetPhysicalDeviceMemoryProperties2KHR" if you enabled extension VK_KHR_get_physical_device_properties2. + PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR; +#endif +#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. + PFN_vkGetDeviceBufferMemoryRequirementsKHR VMA_NULLABLE vkGetDeviceBufferMemoryRequirements; + /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. + PFN_vkGetDeviceImageMemoryRequirementsKHR VMA_NULLABLE vkGetDeviceImageMemoryRequirements; +#endif +} VmaVulkanFunctions; + +/// Description of a Allocator to be created. +typedef struct VmaAllocatorCreateInfo +{ + /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum. + VmaAllocatorCreateFlags flags; + /// Vulkan physical device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /// Vulkan device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkDevice VMA_NOT_NULL device; + /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional. + /** Set to 0 to use default, which is currently 256 MiB. */ + VkDeviceSize preferredLargeHeapBlockSize; + /// Custom CPU memory allocation callbacks. Optional. + /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */ + const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; + /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. + /** Optional, can be null. */ + const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks; + /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. + + If not NULL, it must be a pointer to an array of + `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on + maximum number of bytes that can be allocated out of particular Vulkan memory + heap. + + Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that + heap. This is also the default in case of `pHeapSizeLimit` = NULL. + + If there is a limit defined for a heap: + + - If user tries to allocate more memory from that heap using this allocator, + the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the + value of this limit will be reported instead when using vmaGetMemoryProperties(). + + Warning! Using this feature may not be equivalent to installing a GPU with + smaller amount of memory, because graphics driver doesn't necessary fail new + allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is + exceeded. It may return success and just silently migrate some device memory + blocks to system RAM. This driver behavior can also be controlled using + VK_AMD_memory_overallocation_behavior extension. + */ + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit; + + /** \brief Pointers to Vulkan functions. Can be null. + + For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions). + */ + const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions; + /** \brief Handle to Vulkan instance object. + + Starting from version 3.0.0 this member is no longer optional, it must be set! + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Optional. Vulkan version that the application uses. + + It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`. + The patch version number specified is ignored. Only the major and minor versions are considered. + Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation. + Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`. + It must match the Vulkan version used by the application and supported on the selected physical device, + so it must be no higher than `VkApplicationInfo::apiVersion` passed to `vkCreateInstance` + and no higher than `VkPhysicalDeviceProperties::apiVersion` found on the physical device used. + */ + uint32_t vulkanApiVersion; +#if VMA_EXTERNAL_MEMORY + /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type. + + If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount` + elements, defining external memory handle types of particular Vulkan memory type, + to be passed using `VkExportMemoryAllocateInfoKHR`. + + Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type. + This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL. + */ + const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes; +#endif // #if VMA_EXTERNAL_MEMORY +} VmaAllocatorCreateInfo; + +/// Information about existing #VmaAllocator object. +typedef struct VmaAllocatorInfo +{ + /** \brief Handle to Vulkan instance object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::instance. + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Handle to Vulkan physical device object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice. + */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /** \brief Handle to Vulkan device object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::device. + */ + VkDevice VMA_NOT_NULL device; +} VmaAllocatorInfo; + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total. + +These are fast to calculate. +See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics(). +*/ +typedef struct VmaStatistics +{ + /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated. + */ + uint32_t blockCount; + /** \brief Number of #VmaAllocation objects allocated. + + Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`. + */ + uint32_t allocationCount; + /** \brief Number of bytes allocated in `VkDeviceMemory` blocks. + + \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object + (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls + "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image. + */ + VkDeviceSize blockBytes; + /** \brief Total number of bytes occupied by all #VmaAllocation objects. + + Always less or equal than `blockBytes`. + Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan + but unused by any #VmaAllocation. + */ + VkDeviceSize allocationBytes; +} VmaStatistics; + +/** \brief More detailed statistics than #VmaStatistics. + +These are slower to calculate. Use for debugging purposes. +See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics(). + +Previous version of the statistics API provided averages, but they have been removed +because they can be easily calculated as: + +\code +VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount; +VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes; +VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount; +\endcode +*/ +typedef struct VmaDetailedStatistics +{ + /// Basic statistics. + VmaStatistics statistics; + /// Number of free ranges of memory between allocations. + uint32_t unusedRangeCount; + /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations. + VkDeviceSize allocationSizeMin; + /// Largest allocation size. 0 if there are 0 allocations. + VkDeviceSize allocationSizeMax; + /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges. + VkDeviceSize unusedRangeSizeMin; + /// Largest empty range size. 0 if there are 0 empty ranges. + VkDeviceSize unusedRangeSizeMax; +} VmaDetailedStatistics; + +/** \brief General statistics from current state of the Allocator - +total memory usage across all memory heaps and types. + +These are slower to calculate. Use for debugging purposes. +See function vmaCalculateStatistics(). +*/ +typedef struct VmaTotalStatistics +{ + VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES]; + VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS]; + VmaDetailedStatistics total; +} VmaTotalStatistics; + +/** \brief Statistics of current memory usage and available budget for a specific memory heap. + +These are fast to calculate. +See function vmaGetHeapBudgets(). +*/ +typedef struct VmaBudget +{ + /** \brief Statistics fetched from the library. + */ + VmaStatistics statistics; + /** \brief Estimated current memory usage of the program, in bytes. + + Fetched from system using VK_EXT_memory_budget extension if enabled. + + It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects + also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or + `VkDeviceMemory` blocks allocated outside of this library, if any. + */ + VkDeviceSize usage; + /** \brief Estimated amount of memory available to the program, in bytes. + + Fetched from system using VK_EXT_memory_budget extension if enabled. + + It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors + external to the program, decided by the operating system. + Difference `budget - usage` is the amount of additional memory that can probably + be allocated without problems. Exceeding the budget may result in various problems. + */ + VkDeviceSize budget; +} VmaBudget; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \brief Parameters of new #VmaAllocation. + +To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others. +*/ +typedef struct VmaAllocationCreateInfo +{ + /// Use #VmaAllocationCreateFlagBits enum. + VmaAllocationCreateFlags flags; + /** \brief Intended usage of memory. + + You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored. + */ + VmaMemoryUsage usage; + /** \brief Flags that must be set in a Memory Type chosen for an allocation. + + Leave 0 if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored.*/ + VkMemoryPropertyFlags requiredFlags; + /** \brief Flags that preferably should be set in a memory type chosen for an allocation. + + Set to 0 if no additional flags are preferred. \n + If `pool` is not null, this member is ignored. */ + VkMemoryPropertyFlags preferredFlags; + /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. + + Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if + it meets other requirements specified by this structure, with no further + restrictions on memory type index. \n + If `pool` is not null, this member is ignored. + */ + uint32_t memoryTypeBits; + /** \brief Pool that this allocation should be created in. + + Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: + `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. + */ + VmaPool VMA_NULLABLE pool; + /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). + + If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either + null or pointer to a null-terminated string. The string will be then copied to + internal buffer, so it doesn't need to be valid after allocation call. + */ + void* VMA_NULLABLE pUserData; + /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. + + It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object + and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + Otherwise, it has the priority of a memory block where it is placed and this variable is ignored. + */ + float priority; +} VmaAllocationCreateInfo; + +/// Describes parameter of created #VmaPool. +typedef struct VmaPoolCreateInfo +{ + /** \brief Vulkan memory type index to allocate this pool from. + */ + uint32_t memoryTypeIndex; + /** \brief Use combination of #VmaPoolCreateFlagBits. + */ + VmaPoolCreateFlags flags; + /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional. + + Specify nonzero to set explicit, constant size of memory blocks used by this + pool. + + Leave 0 to use default and let the library manage block sizes automatically. + Sizes of particular blocks may vary. + In this case, the pool will also support dedicated allocations. + */ + VkDeviceSize blockSize; + /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. + + Set to 0 to have no preallocated blocks and allow the pool be completely empty. + */ + size_t minBlockCount; + /** \brief Maximum number of blocks that can be allocated in this pool. Optional. + + Set to 0 to use default, which is `SIZE_MAX`, which means no limit. + + Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated + throughout whole lifetime of this pool. + */ + size_t maxBlockCount; + /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations. + + It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object. + Otherwise, this variable is ignored. + */ + float priority; + /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0. + + Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two. + It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough, + e.g. when doing interop with OpenGL. + */ + VkDeviceSize minAllocationAlignment; + /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional. + + Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`. + It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`. + Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool. + + Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`, + can be attached automatically by this library when using other, more convenient of its features. + */ + void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext; +} VmaPoolCreateInfo; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** +Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo(). + +There is also an extended version of this structure that carries additional parameters: #VmaAllocationInfo2. +*/ +typedef struct VmaAllocationInfo +{ + /** \brief Memory type index that this allocation was allocated from. + + It never changes. + */ + uint32_t memoryType; + /** \brief Handle to Vulkan memory object. + + Same memory object can be shared by multiple allocations. + + It can change after the allocation is moved during \ref defragmentation. + */ + VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory; + /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation. + + You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function + vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image, + not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation + and apply this offset automatically. + + It can change after the allocation is moved during \ref defragmentation. + */ + VkDeviceSize offset; + /** \brief Size of this allocation, in bytes. + + It never changes. + + \note Allocation size returned in this variable may be greater than the size + requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the + allocation is accessible for operations on memory e.g. using a pointer after + mapping with vmaMapMemory(), but operations on the resource e.g. using + `vkCmdCopyBuffer` must be limited to the size of the resource. + */ + VkDeviceSize size; + /** \brief Pointer to the beginning of this allocation as mapped data. + + If the allocation hasn't been mapped using vmaMapMemory() and hasn't been + created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null. + + It can change after call to vmaMapMemory(), vmaUnmapMemory(). + It can also change after the allocation is moved during \ref defragmentation. + */ + void* VMA_NULLABLE pMappedData; + /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). + + It can change after call to vmaSetAllocationUserData() for this allocation. + */ + void* VMA_NULLABLE pUserData; + /** \brief Custom allocation name that was set with vmaSetAllocationName(). + + It can change after call to vmaSetAllocationName() for this allocation. + + Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with + additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED]. + */ + const char* VMA_NULLABLE pName; +} VmaAllocationInfo; + +/// Extended parameters of a #VmaAllocation object that can be retrieved using function vmaGetAllocationInfo2(). +typedef struct VmaAllocationInfo2 +{ + /** \brief Basic parameters of the allocation. + + If you need only these, you can use function vmaGetAllocationInfo() and structure #VmaAllocationInfo instead. + */ + VmaAllocationInfo allocationInfo; + /** \brief Size of the `VkDeviceMemory` block that the allocation belongs to. + + In case of an allocation with dedicated memory, it will be equal to `allocationInfo.size`. + */ + VkDeviceSize blockSize; + /** \brief `VK_TRUE` if the allocation has dedicated memory, `VK_FALSE` if it was placed as part of a larger memory block. + + When `VK_TRUE`, it also means `VkMemoryDedicatedAllocateInfo` was used when creating the allocation + (if VK_KHR_dedicated_allocation extension or Vulkan version >= 1.1 is enabled). + */ + VkBool32 dedicatedMemory; +} VmaAllocationInfo2; + +/** Callback function called during vmaBeginDefragmentation() to check custom criterion about ending current defragmentation pass. + +Should return true if the defragmentation needs to stop current pass. +*/ +typedef VkBool32 (VKAPI_PTR* PFN_vmaCheckDefragmentationBreakFunction)(void* VMA_NULLABLE pUserData); + +/** \brief Parameters for defragmentation. + +To be used with function vmaBeginDefragmentation(). +*/ +typedef struct VmaDefragmentationInfo +{ + /// \brief Use combination of #VmaDefragmentationFlagBits. + VmaDefragmentationFlags flags; + /** \brief Custom pool to be defragmented. + + If null then default pools will undergo defragmentation process. + */ + VmaPool VMA_NULLABLE pool; + /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places. + + `0` means no limit. + */ + VkDeviceSize maxBytesPerPass; + /** \brief Maximum number of allocations that can be moved during single pass to a different place. + + `0` means no limit. + */ + uint32_t maxAllocationsPerPass; + /** \brief Optional custom callback for stopping vmaBeginDefragmentation(). + + Have to return true for breaking current defragmentation pass. + */ + PFN_vmaCheckDefragmentationBreakFunction VMA_NULLABLE pfnBreakCallback; + /// \brief Optional data to pass to custom callback for stopping pass of defragmentation. + void* VMA_NULLABLE pBreakCallbackUserData; +} VmaDefragmentationInfo; + +/// Single move of an allocation to be done for defragmentation. +typedef struct VmaDefragmentationMove +{ + /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it. + VmaDefragmentationMoveOperation operation; + /// Allocation that should be moved. + VmaAllocation VMA_NOT_NULL srcAllocation; + /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`. + + \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass, + to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory(). + vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory. + */ + VmaAllocation VMA_NOT_NULL dstTmpAllocation; +} VmaDefragmentationMove; + +/** \brief Parameters for incremental defragmentation steps. + +To be used with function vmaBeginDefragmentationPass(). +*/ +typedef struct VmaDefragmentationPassMoveInfo +{ + /// Number of elements in the `pMoves` array. + uint32_t moveCount; + /** \brief Array of moves to be performed by the user in the current defragmentation pass. + + Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass(). + + For each element, you should: + + 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset. + 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`. + 3. Make sure these commands finished executing on the GPU. + 4. Destroy the old buffer/image. + + Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass(). + After this call, the allocation will point to the new place in memory. + + Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. + + Alternatively, if you decide you want to completely remove the allocation: + + 1. Destroy its buffer/image. + 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. + + Then, after vmaEndDefragmentationPass() the allocation will be freed. + */ + VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves; +} VmaDefragmentationPassMoveInfo; + +/// Statistics returned for defragmentation process in function vmaEndDefragmentation(). +typedef struct VmaDefragmentationStats +{ + /// Total number of bytes that have been copied while moving allocations to different places. + VkDeviceSize bytesMoved; + /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects. + VkDeviceSize bytesFreed; + /// Number of allocations that have been moved to different places. + uint32_t allocationsMoved; + /// Number of empty `VkDeviceMemory` objects that have been released to the system. + uint32_t deviceMemoryBlocksFreed; +} VmaDefragmentationStats; + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock(). +typedef struct VmaVirtualBlockCreateInfo +{ + /** \brief Total size of the virtual block. + + Sizes can be expressed in bytes or any units you want as long as you are consistent in using them. + For example, if you allocate from some array of structures, 1 can mean single instance of entire structure. + */ + VkDeviceSize size; + + /** \brief Use combination of #VmaVirtualBlockCreateFlagBits. + */ + VmaVirtualBlockCreateFlags flags; + + /** \brief Custom CPU memory allocation callbacks. Optional. + + Optional, can be null. When specified, they will be used for all CPU-side memory allocations. + */ + const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; +} VmaVirtualBlockCreateInfo; + +/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate(). +typedef struct VmaVirtualAllocationCreateInfo +{ + /** \brief Size of the allocation. + + Cannot be zero. + */ + VkDeviceSize size; + /** \brief Required alignment of the allocation. Optional. + + Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset. + */ + VkDeviceSize alignment; + /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits. + */ + VmaVirtualAllocationCreateFlags flags; + /** \brief Custom pointer to be associated with the allocation. Optional. + + It can be any value and can be used for user-defined purposes. It can be fetched or changed later. + */ + void* VMA_NULLABLE pUserData; +} VmaVirtualAllocationCreateInfo; + +/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo(). +typedef struct VmaVirtualAllocationInfo +{ + /** \brief Offset of the allocation. + + Offset at which the allocation was made. + */ + VkDeviceSize offset; + /** \brief Size of the allocation. + + Same value as passed in VmaVirtualAllocationCreateInfo::size. + */ + VkDeviceSize size; + /** \brief Custom pointer associated with the allocation. + + Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData(). + */ + void* VMA_NULLABLE pUserData; +} VmaVirtualAllocationInfo; + +/** @} */ + +#endif // _VMA_DATA_TYPES_DECLARATIONS + +#ifndef _VMA_FUNCTION_HEADERS + +/** +\addtogroup group_init +@{ +*/ + +/// Creates #VmaAllocator object. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator); + +/// Destroys allocator object. +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator VMA_NULLABLE allocator); + +/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc. + +It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to +`VkPhysicalDevice`, `VkDevice` etc. every time using this function. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo); + +/** +PhysicalDeviceProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties); + +/** +PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties); + +/** +\brief Given Memory Type Index, returns Property Flags of this memory type. + +This is just a convenience function. Same information can be obtained using +vmaGetMemoryProperties(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); + +/** \brief Sets index of the current frame. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t frameIndex); + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Retrieves statistics from current state of the Allocator. + +This function is called "calculate" not "get" because it has to traverse all +internal data structures, so it may be quite slow. Use it for debugging purposes. +For faster but more brief statistics suitable to be called every frame or every allocation, +use vmaGetHeapBudgets(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaTotalStatistics* VMA_NOT_NULL pStats); + +/** \brief Retrieves information about current memory usage and budget for all memory heaps. + +\param allocator +\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used. + +This function is called "get" not "calculate" because it is very fast, suitable to be called +every frame or every allocation. For more detailed statistics use vmaCalculateStatistics(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( + VmaAllocator VMA_NOT_NULL allocator, + VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets); + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** +\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo. + +This algorithm tries to find a memory type that: + +- Is allowed by memoryTypeBits. +- Contains all the flags from pAllocationCreateInfo->requiredFlags. +- Matches intended usage. +- Has as many flags from pAllocationCreateInfo->preferredFlags as possible. + +\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result +from this function or any other allocating function probably means that your +device doesn't support any memory type with requested features for the specific +type of resource you want to use it for. Please check parameters of your +resource, like image layout (OPTIMAL versus LINEAR) or mip level count. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** +\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It internally creates a temporary, dummy buffer that never has memory bound. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** +\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It internally creates a temporary, dummy image that never has memory bound. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** \brief Allocates Vulkan device memory and creates #VmaPool object. + +\param allocator Allocator object. +\param pCreateInfo Parameters of pool to create. +\param[out] pPool Handle to created pool. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator VMA_NOT_NULL allocator, + const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool); + +/** \brief Destroys #VmaPool object and frees Vulkan device memory. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NULLABLE pool); + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Retrieves statistics of existing #VmaPool object. + +\param allocator Allocator object. +\param pool Pool object. +\param[out] pPoolStats Statistics of specified pool. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaStatistics* VMA_NOT_NULL pPoolStats); + +/** \brief Retrieves detailed statistics of existing #VmaPool object. + +\param allocator Allocator object. +\param pool Pool object. +\param[out] pPoolStats Statistics of specified pool. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaDetailedStatistics* VMA_NOT_NULL pPoolStats); + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool); + +/** \brief Retrieves name of a custom pool. + +After the call `ppName` is either null or points to an internally-owned null-terminated string +containing name of the pool that was previously set. The pointer becomes invalid when the pool is +destroyed or its name is changed using vmaSetPoolName(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char* VMA_NULLABLE* VMA_NOT_NULL ppName); + +/** \brief Sets name of a custom pool. + +`pName` can be either null or pointer to a null-terminated string with new name for the pool. +Function makes internal copy of the string, so it can be changed or freed immediately after this call. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char* VMA_NULLABLE pName); + +/** \brief General purpose memory allocation. + +\param allocator +\param pVkMemoryRequirements +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), +vmaCreateBuffer(), vmaCreateImage() instead whenever possible. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief General purpose memory allocation for multiple allocation objects at once. + +\param allocator Allocator object. +\param pVkMemoryRequirements Memory requirements for each allocation. +\param pCreateInfo Creation parameters for each allocation. +\param allocationCount Number of allocations to make. +\param[out] pAllocations Pointer to array that will be filled with handles to created allocations. +\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. +It is just a general purpose allocation function able to make multiple allocations at once. +It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times. + +All allocations are made using same parameters. All of them are created out of the same memory pool and type. +If any allocation fails, all allocations already made within this function call are also freed, so that when +returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo, + size_t allocationCount, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, + VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo); + +/** \brief Allocates memory suitable for given `VkBuffer`. + +\param allocator +\param buffer +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory(). + +This is a special-purpose function. In most cases you should use vmaCreateBuffer(). + +You must free the allocation using vmaFreeMemory() when no longer needed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Allocates memory suitable for given `VkImage`. + +\param allocator +\param image +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory(). + +This is a special-purpose function. In most cases you should use vmaCreateImage(). + +You must free the allocation using vmaFreeMemory() when no longer needed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). + +Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VmaAllocation VMA_NULLABLE allocation); + +/** \brief Frees memory and destroys multiple allocations. + +Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. +It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), +vmaAllocateMemoryPages() and other functions. +It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times. + +Allocations in `pAllocations` array can come from any memory pools and types. +Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + size_t allocationCount, + const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations); + +/** \brief Returns current information about specified allocation. + +Current parameters of given allocation are returned in `pAllocationInfo`. + +Although this function doesn't lock any mutex, so it should be quite efficient, +you should avoid calling it too often. +You can retrieve same VmaAllocationInfo structure while creating your resource, from function +vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change +(e.g. due to defragmentation). + +There is also a new function vmaGetAllocationInfo2() that offers extended information +about the allocation, returned using new structure #VmaAllocationInfo2. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo); + +/** \brief Returns extended information about specified allocation. + +Current parameters of given allocation are returned in `pAllocationInfo`. +Extended parameters in structure #VmaAllocationInfo2 include memory block size +and a flag telling whether the allocation has dedicated memory. +It can be useful e.g. for interop with OpenGL. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VmaAllocationInfo2* VMA_NOT_NULL pAllocationInfo); + +/** \brief Sets pUserData in given allocation to new value. + +The value of pointer `pUserData` is copied to allocation's `pUserData`. +It is opaque, so you can use it however you want - e.g. +as a pointer, ordinal number or some handle to you own data. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void* VMA_NULLABLE pUserData); + +/** \brief Sets pName in given allocation to new value. + +`pName` must be either null, or pointer to a null-terminated string. The function +makes local copy of the string and sets it as allocation's `pName`. String +passed as pName doesn't need to be valid for whole lifetime of the allocation - +you can free it after this call. String previously pointed by allocation's +`pName` is freed from memory. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const char* VMA_NULLABLE pName); + +/** +\brief Given an allocation, returns Property Flags of its memory type. + +This is just a convenience function. Same information can be obtained using +vmaGetAllocationInfo() + vmaGetMemoryProperties(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); + +/** \brief Maps memory represented by given allocation and returns pointer to it. + +Maps memory represented by given allocation to make it accessible to CPU code. +When succeeded, `*ppData` contains pointer to first byte of this memory. + +\warning +If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is +correctly offsetted to the beginning of region assigned to this particular allocation. +Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block. +You should not add VmaAllocationInfo::offset to it! + +Mapping is internally reference-counted and synchronized, so despite raw Vulkan +function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory` +multiple times simultaneously, it is safe to call this function on allocations +assigned to the same memory block. Actual Vulkan memory will be mapped on first +mapping and unmapped on last unmapping. + +If the function succeeded, you must call vmaUnmapMemory() to unmap the +allocation when mapping is no longer needed or before freeing the allocation, at +the latest. + +It also safe to call this function multiple times on the same allocation. You +must call vmaUnmapMemory() same number of times as you called vmaMapMemory(). + +It is also safe to call this function on allocation created with +#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. +You must still call vmaUnmapMemory() same number of times as you called +vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the +"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. + +This function fails when used on allocation made in memory type that is not +`HOST_VISIBLE`. + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void* VMA_NULLABLE* VMA_NOT_NULL ppData); + +/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). + +For details, see description of vmaMapMemory(). + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation); + +/** \brief Flushes memory of given allocation. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. +Unmap operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! + +This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); + +/** \brief Invalidates memory of given allocation. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. +Map operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! + +This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if +it is called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); + +/** \brief Flushes memory of given set of allocations. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations. +For more information, see documentation of vmaFlushAllocation(). + +\param allocator +\param allocationCount +\param allocations +\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all offsets are zero. +\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + +This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + +/** \brief Invalidates memory of given set of allocations. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations. +For more information, see documentation of vmaInvalidateAllocation(). + +\param allocator +\param allocationCount +\param allocations +\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all offsets are zero. +\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + +This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + +/** \brief Maps the allocation temporarily if needed, copies data from specified host pointer to it, and flushes the memory from the host caches if needed. + +\param allocator +\param pSrcHostPointer Pointer to the host data that become source of the copy. +\param dstAllocation Handle to the allocation that becomes destination of the copy. +\param dstAllocationLocalOffset Offset within `dstAllocation` where to write copied data, in bytes. +\param size Number of bytes to copy. + +This is a convenience function that allows to copy data from a host pointer to an allocation easily. +Same behavior can be achieved by calling vmaMapMemory(), `memcpy()`, vmaUnmapMemory(), vmaFlushAllocation(). + +This function can be called only for allocations created in a memory type that has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. +It can be ensured e.g. by using #VMA_MEMORY_USAGE_AUTO and #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or +#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +Otherwise, the function will fail and generate a Validation Layers error. + +`dstAllocationLocalOffset` is relative to the contents of given `dstAllocation`. +If you mean whole allocation, you should pass 0. +Do not pass allocation's offset within device memory block this parameter! +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyMemoryToAllocation( + VmaAllocator VMA_NOT_NULL allocator, + const void* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(size) pSrcHostPointer, + VmaAllocation VMA_NOT_NULL dstAllocation, + VkDeviceSize dstAllocationLocalOffset, + VkDeviceSize size); + +/** \brief Invalidates memory in the host caches if needed, maps the allocation temporarily if needed, and copies data from it to a specified host pointer. + +\param allocator +\param srcAllocation Handle to the allocation that becomes source of the copy. +\param srcAllocationLocalOffset Offset within `srcAllocation` where to read copied data, in bytes. +\param pDstHostPointer Pointer to the host memory that become destination of the copy. +\param size Number of bytes to copy. + +This is a convenience function that allows to copy data from an allocation to a host pointer easily. +Same behavior can be achieved by calling vmaInvalidateAllocation(), vmaMapMemory(), `memcpy()`, vmaUnmapMemory(). + +This function should be called only for allocations created in a memory type that has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT` flag. +It can be ensured e.g. by using #VMA_MEMORY_USAGE_AUTO and #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +Otherwise, the function may fail and generate a Validation Layers error. +It may also work very slowly when reading from an uncached memory. + +`srcAllocationLocalOffset` is relative to the contents of given `srcAllocation`. +If you mean whole allocation, you should pass 0. +Do not pass allocation's offset within device memory block as this parameter! +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyAllocationToMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL srcAllocation, + VkDeviceSize srcAllocationLocalOffset, + void* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(size) pDstHostPointer, + VkDeviceSize size); + +/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. + +\param allocator +\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeBits); + +/** \brief Begins defragmentation process. + +\param allocator Allocator object. +\param pInfo Structure filled with parameters of defragmentation. +\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation. +\returns +- `VK_SUCCESS` if defragmentation can begin. +- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported. + +For more information about defragmentation, see documentation chapter: +[Defragmentation](@ref defragmentation). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( + VmaAllocator VMA_NOT_NULL allocator, + const VmaDefragmentationInfo* VMA_NOT_NULL pInfo, + VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext); + +/** \brief Ends defragmentation process. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param[out] pStats Optional stats for the defragmentation. Can be null. + +Use this function to finish defragmentation started by vmaBeginDefragmentation(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationStats* VMA_NULLABLE pStats); + +/** \brief Starts single defragmentation pass. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param[out] pPassInfo Computed information for current pass. +\returns +- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation. +- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(), + and then preferably try another pass with vmaBeginDefragmentationPass(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); + +/** \brief Ends single defragmentation pass. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you. + +Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible. + +Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`. +After this call: + +- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY + (which is the default) will be pointing to the new destination place. +- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY + will be freed. + +If no more moves are possible you can end whole defragmentation. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); + +/** \brief Binds buffer to allocation. + +Binds specified buffer to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create a buffer, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindBufferMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateBuffer() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer); + +/** \brief Binds buffer to allocation with additional parameters. + +\param allocator +\param allocation +\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. +\param buffer +\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindBufferMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindBufferMemoryInfoKHR) pNext); + +/** \brief Binds image to allocation. + +Binds specified image to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create an image, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindImageMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateImage() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image); + +/** \brief Binds image to allocation with additional parameters. + +\param allocator +\param allocation +\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. +\param image +\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindImageMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindImageMemoryInfoKHR) pNext); + +/** \brief Creates a new `VkBuffer`, allocates and binds memory for it. + +\param allocator +\param pBufferCreateInfo +\param pAllocationCreateInfo +\param[out] pBuffer Buffer that was created. +\param[out] pAllocation Allocation that was created. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +This function automatically: + +-# Creates buffer. +-# Allocates appropriate memory for it. +-# Binds the buffer with the memory. + +If any of these operations fail, buffer and allocation are not created, +returned value is negative error code, `*pBuffer` and `*pAllocation` are null. + +If the function succeeded, you must destroy both buffer and allocation when you +no longer need them using either convenience function vmaDestroyBuffer() or +separately, using `vkDestroyBuffer()` and vmaFreeMemory(). + +If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, +VK_KHR_dedicated_allocation extension is used internally to query driver whether +it requires or prefers the new buffer to have dedicated allocation. If yes, +and if dedicated allocation is possible +(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated +allocation for this buffer, just like when using +#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + +\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer, +although recommended as a good practice, is out of scope of this library and could be implemented +by the user as a higher-level logic on top of VMA. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Creates a buffer with additional minimum alignment. + +Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom, +minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g. +for interop with OpenGL. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkDeviceSize minAlignment, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Creates a new `VkBuffer`, binds already created memory for it. + +\param allocator +\param allocation Allocation that provides memory to be used for binding new buffer to it. +\param pBufferCreateInfo +\param[out] pBuffer Buffer that was created. + +This function automatically: + +-# Creates buffer. +-# Binds the buffer with the supplied memory. + +If any of these operations fail, buffer is not created, +returned value is negative error code and `*pBuffer` is null. + +If the function succeeded, you must destroy the buffer when you +no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding +allocation you can use convenience function vmaDestroyBuffer(). + +\note There is a new version of this function augmented with parameter `allocationLocalOffset` - see vmaCreateAliasingBuffer2(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); + +/** \brief Creates a new `VkBuffer`, binds already created memory for it. + +\param allocator +\param allocation Allocation that provides memory to be used for binding new buffer to it. +\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the allocation. Normally it should be 0. +\param pBufferCreateInfo +\param[out] pBuffer Buffer that was created. + +This function automatically: + +-# Creates buffer. +-# Binds the buffer with the supplied memory. + +If any of these operations fail, buffer is not created, +returned value is negative error code and `*pBuffer` is null. + +If the function succeeded, you must destroy the buffer when you +no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding +allocation you can use convenience function vmaDestroyBuffer(). + +\note This is a new version of the function augmented with parameter `allocationLocalOffset`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); + +/** \brief Destroys Vulkan buffer and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyBuffer(device, buffer, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It is safe to pass null as buffer and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer, + VmaAllocation VMA_NULLABLE allocation); + +/// Function similar to vmaCreateBuffer(). +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/// Function similar to vmaCreateAliasingBuffer() but for images. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); + +/// Function similar to vmaCreateAliasingBuffer2() but for images. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); + +/** \brief Destroys Vulkan image and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyImage(device, image, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It is safe to pass null as image and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NULLABLE_NON_DISPATCHABLE image, + VmaAllocation VMA_NULLABLE allocation); + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \brief Creates new #VmaVirtualBlock object. + +\param pCreateInfo Parameters for creation. +\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( + const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock); + +/** \brief Destroys #VmaVirtualBlock object. + +Please note that you should consciously handle virtual allocations that could remain unfreed in the block. +You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock() +if you are sure this is what you want. If you do neither, an assert is called. + +If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`, +don't forget to free them. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock( + VmaVirtualBlock VMA_NULLABLE virtualBlock); + +/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations. +*/ +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty( + VmaVirtualBlock VMA_NOT_NULL virtualBlock); + +/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo); + +/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock. + +If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned +(despite the function doesn't ever allocate actual GPU memory). +`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`. + +\param virtualBlock Virtual block +\param pCreateInfo Parameters for the allocation +\param[out] pAllocation Returned handle of the new allocation +\param[out] pOffset Returned offset of the new allocation. Optional, can be null. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, + VkDeviceSize* VMA_NULLABLE pOffset); + +/** \brief Frees virtual allocation inside given #VmaVirtualBlock. + +It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation); + +/** \brief Frees all virtual allocations inside given #VmaVirtualBlock. + +You must either call this function or free each virtual allocation individually with vmaVirtualFree() +before destroying a virtual block. Otherwise, an assert is called. + +If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`, +don't forget to free it as well. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock( + VmaVirtualBlock VMA_NOT_NULL virtualBlock); + +/** \brief Changes custom pointer associated with given virtual allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, + void* VMA_NULLABLE pUserData); + +/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock. + +This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaStatistics* VMA_NOT_NULL pStats); + +/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock. + +This function is slow to call. Use for debugging purposes. +For less detailed statistics, see vmaGetVirtualBlockStatistics(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaDetailedStatistics* VMA_NOT_NULL pStats); + +/** @} */ + +#if VMA_STATS_STRING_ENABLED +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock. +\param virtualBlock Virtual block. +\param[out] ppStatsString Returned string. +\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces. + +Returned string must be freed using vmaFreeVirtualBlockStatsString(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, + VkBool32 detailedMap); + +/// Frees a string returned by vmaBuildVirtualBlockStatsString(). +VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE pStatsString); + +/** \brief Builds and returns statistics as a null-terminated string in JSON format. +\param allocator +\param[out] ppStatsString Must be freed using vmaFreeStatsString() function. +\param detailedMap +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, + VkBool32 detailedMap); + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char* VMA_NULLABLE pStatsString); + +/** @} */ + +#endif // VMA_STATS_STRING_ENABLED + +#endif // _VMA_FUNCTION_HEADERS + +#ifdef __cplusplus +} +#endif + +#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// +// +// IMPLEMENTATION +// +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// For Visual Studio IntelliSense. +#if defined(__cplusplus) && defined(__INTELLISENSE__) +#define VMA_IMPLEMENTATION +#endif + +#ifdef VMA_IMPLEMENTATION +#undef VMA_IMPLEMENTATION + +#include +#include +#include +#include +#include +#include + +#if !defined(VMA_CPP20) + #if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 + #define VMA_CPP20 1 + #else + #define VMA_CPP20 0 + #endif +#endif + +#ifdef _MSC_VER + #include // For functions like __popcnt, _BitScanForward etc. +#endif +#if VMA_CPP20 + #include +#endif + +#if VMA_STATS_STRING_ENABLED + #include // For snprintf +#endif + +/******************************************************************************* +CONFIGURATION SECTION + +Define some of these macros before each #include of this header or change them +here if you need other then default behavior depending on your environment. +*/ +#ifndef _VMA_CONFIGURATION + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; +*/ +#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES) + #define VMA_STATIC_VULKAN_FUNCTIONS 1 +#endif + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory"); + +To use this feature in new versions of VMA you now have to pass +VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as +VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null. +*/ +#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS) + #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 +#endif + +#ifndef VMA_USE_STL_SHARED_MUTEX + #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 + #define VMA_USE_STL_SHARED_MUTEX 1 + // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus + // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. + #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + #else + #define VMA_USE_STL_SHARED_MUTEX 0 + #endif +#endif + +/* +Define this macro to include custom header files without having to edit this file directly, e.g.: + + // Inside of "my_vma_configuration_user_includes.h": + + #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT + #include "my_custom_min.h" // for my_custom_min + #include + #include + + // Inside a different file, which includes "vk_mem_alloc.h": + + #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h" + #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr) + #define VMA_MIN(v1, v2) (my_custom_min(v1, v2)) + #include "vk_mem_alloc.h" + ... + +The following headers are used in this CONFIGURATION section only, so feel free to +remove them if not needed. +*/ +#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H) + #include // for assert + #include // for min, max, swap + #include +#else + #include VMA_CONFIGURATION_USER_INCLUDES_H +#endif + +#ifndef VMA_NULL + // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. + #define VMA_NULL nullptr +#endif + +#ifndef VMA_FALLTHROUGH + #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 + #define VMA_FALLTHROUGH [[fallthrough]] + #else + #define VMA_FALLTHROUGH + #endif +#endif + +// Normal assert to check for programmer's errors, especially in Debug configuration. +#ifndef VMA_ASSERT + #ifdef NDEBUG + #define VMA_ASSERT(expr) + #else + #define VMA_ASSERT(expr) assert(expr) + #endif +#endif + +// Assert that will be called very often, like inside data structures e.g. operator[]. +// Making it non-empty can make program slow. +#ifndef VMA_HEAVY_ASSERT + #ifdef NDEBUG + #define VMA_HEAVY_ASSERT(expr) + #else + #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) + #endif +#endif + +// Assert used for reporting memory leaks - unfreed allocations. +#ifndef VMA_ASSERT_LEAK + #define VMA_ASSERT_LEAK(expr) VMA_ASSERT(expr) +#endif + +// If your compiler is not compatible with C++17 and definition of +// aligned_alloc() function is missing, uncommenting following line may help: + +//#include + +#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16) +#include +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + return memalign(alignment, size); +} +#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) +#include + +#if defined(__APPLE__) +#include +#endif + +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4) + // Therefore, for now disable this specific exception until a proper solution is found. + //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0)) + //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0 + // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only + // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds + // // MAC_OS_X_VERSION_10_16), even though the function is marked + // // available for 10.15. That is why the preprocessor checks for 10.16 but + // // the __builtin_available checks for 10.15. + // // People who use C++17 could call aligned_alloc with the 10.15 SDK already. + // if (__builtin_available(macOS 10.15, iOS 13, *)) + // return aligned_alloc(alignment, size); + //#endif + //#endif + + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + void *pointer; + if(posix_memalign(&pointer, alignment, size) == 0) + return pointer; + return VMA_NULL; +} +#elif defined(_WIN32) +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + return _aligned_malloc(size, alignment); +} +#elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + return aligned_alloc(alignment, size); +} +#else +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + VMA_ASSERT(0 && "Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system."); + return VMA_NULL; +} +#endif + +#if defined(_WIN32) +static void vma_aligned_free(void* ptr) +{ + _aligned_free(ptr); +} +#else +static void vma_aligned_free(void* VMA_NULLABLE ptr) +{ + free(ptr); +} +#endif + +#ifndef VMA_ALIGN_OF + #define VMA_ALIGN_OF(type) (alignof(type)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_MALLOC + #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_FREE + // VMA_SYSTEM_FREE is the old name, but might have been defined by the user + #if defined(VMA_SYSTEM_FREE) + #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr) + #else + #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr) + #endif +#endif + +#ifndef VMA_COUNT_BITS_SET + // Returns number of bits set to 1 in (v) + #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v) +#endif + +#ifndef VMA_BITSCAN_LSB + // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX + #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask) +#endif + +#ifndef VMA_BITSCAN_MSB + // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX + #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask) +#endif + +#ifndef VMA_MIN + #define VMA_MIN(v1, v2) ((std::min)((v1), (v2))) +#endif + +#ifndef VMA_MAX + #define VMA_MAX(v1, v2) ((std::max)((v1), (v2))) +#endif + +#ifndef VMA_SORT + #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) +#endif + +#ifndef VMA_DEBUG_LOG_FORMAT + #define VMA_DEBUG_LOG_FORMAT(format, ...) + /* + #define VMA_DEBUG_LOG_FORMAT(format, ...) do { \ + printf((format), __VA_ARGS__); \ + printf("\n"); \ + } while(false) + */ +#endif + +#ifndef VMA_DEBUG_LOG + #define VMA_DEBUG_LOG(str) VMA_DEBUG_LOG_FORMAT("%s", (str)) +#endif + +#ifndef VMA_LEAK_LOG_FORMAT + #define VMA_LEAK_LOG_FORMAT(format, ...) VMA_DEBUG_LOG_FORMAT(format, __VA_ARGS__) +#endif + +#ifndef VMA_CLASS_NO_COPY + #define VMA_CLASS_NO_COPY(className) \ + private: \ + className(const className&) = delete; \ + className& operator=(const className&) = delete; +#endif +#ifndef VMA_CLASS_NO_COPY_NO_MOVE + #define VMA_CLASS_NO_COPY_NO_MOVE(className) \ + private: \ + className(const className&) = delete; \ + className(className&&) = delete; \ + className& operator=(const className&) = delete; \ + className& operator=(className&&) = delete; +#endif + +// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString. +#if VMA_STATS_STRING_ENABLED + static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num) + { + snprintf(outStr, strLen, "%" PRIu32, num); + } + static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num) + { + snprintf(outStr, strLen, "%" PRIu64, num); + } + static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr) + { + snprintf(outStr, strLen, "%p", ptr); + } +#endif + +#ifndef VMA_MUTEX + class VmaMutex + { + VMA_CLASS_NO_COPY_NO_MOVE(VmaMutex) + public: + VmaMutex() { } + void Lock() { m_Mutex.lock(); } + void Unlock() { m_Mutex.unlock(); } + bool TryLock() { return m_Mutex.try_lock(); } + private: + std::mutex m_Mutex; + }; + #define VMA_MUTEX VmaMutex +#endif + +// Read-write mutex, where "read" is shared access, "write" is exclusive access. +#ifndef VMA_RW_MUTEX + #if VMA_USE_STL_SHARED_MUTEX + // Use std::shared_mutex from C++17. + #include + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.lock_shared(); } + void UnlockRead() { m_Mutex.unlock_shared(); } + bool TryLockRead() { return m_Mutex.try_lock_shared(); } + void LockWrite() { m_Mutex.lock(); } + void UnlockWrite() { m_Mutex.unlock(); } + bool TryLockWrite() { return m_Mutex.try_lock(); } + private: + std::shared_mutex m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600 + // Use SRWLOCK from WinAPI. + // Minimum supported client = Windows Vista, server = Windows Server 2008. + class VmaRWMutex + { + public: + VmaRWMutex() { InitializeSRWLock(&m_Lock); } + void LockRead() { AcquireSRWLockShared(&m_Lock); } + void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } + bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; } + void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } + void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } + bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; } + private: + SRWLOCK m_Lock; + }; + #define VMA_RW_MUTEX VmaRWMutex + #else + // Less efficient fallback: Use normal mutex. + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.Lock(); } + void UnlockRead() { m_Mutex.Unlock(); } + bool TryLockRead() { return m_Mutex.TryLock(); } + void LockWrite() { m_Mutex.Lock(); } + void UnlockWrite() { m_Mutex.Unlock(); } + bool TryLockWrite() { return m_Mutex.TryLock(); } + private: + VMA_MUTEX m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #endif // #if VMA_USE_STL_SHARED_MUTEX +#endif // #ifndef VMA_RW_MUTEX + +/* +If providing your own implementation, you need to implement a subset of std::atomic. +*/ +#ifndef VMA_ATOMIC_UINT32 + #include + #define VMA_ATOMIC_UINT32 std::atomic +#endif + +#ifndef VMA_ATOMIC_UINT64 + #include + #define VMA_ATOMIC_UINT64 std::atomic +#endif + +#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY + /** + Every allocation will have its own memory block. + Define to 1 for debugging purposes only. + */ + #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) +#endif + +#ifndef VMA_MIN_ALIGNMENT + /** + Minimum alignment of all allocations, in bytes. + Set to more than 1 for debugging purposes. Must be power of two. + */ + #ifdef VMA_DEBUG_ALIGNMENT // Old name + #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT + #else + #define VMA_MIN_ALIGNMENT (1) + #endif +#endif + +#ifndef VMA_DEBUG_MARGIN + /** + Minimum margin after every allocation, in bytes. + Set nonzero for debugging purposes only. + */ + #define VMA_DEBUG_MARGIN (0) +#endif + +#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS + /** + Define this macro to 1 to automatically fill new allocations and destroyed + allocations with some bit pattern. + */ + #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) +#endif + +#ifndef VMA_DEBUG_DETECT_CORRUPTION + /** + Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to + enable writing magic value to the margin after every allocation and + validating it, so that memory corruptions (out-of-bounds writes) are detected. + */ + #define VMA_DEBUG_DETECT_CORRUPTION (0) +#endif + +#ifndef VMA_DEBUG_GLOBAL_MUTEX + /** + Set this to 1 for debugging purposes only, to enable single mutex protecting all + entry calls to the library. Can be useful for debugging multithreading issues. + */ + #define VMA_DEBUG_GLOBAL_MUTEX (0) +#endif + +#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY + /** + Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. + Set to more than 1 for debugging purposes only. Must be power of two. + */ + #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) +#endif + +#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT + /* + Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount + and return error instead of leaving up to Vulkan implementation what to do in such cases. + */ + #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0) +#endif + +#ifndef VMA_SMALL_HEAP_MAX_SIZE + /// Maximum size of a memory heap in Vulkan to consider it "small". + #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) +#endif + +#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE + /// Default size of a block allocated as single VkDeviceMemory from a "large" heap. + #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) +#endif + +/* +Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called +or a persistently mapped allocation is created and destroyed several times in a row. +It keeps additional +1 mapping of a device memory block to prevent calling actual +vkMapMemory/vkUnmapMemory too many times, which may improve performance and help +tools like RenderDoc. +*/ +#ifndef VMA_MAPPING_HYSTERESIS_ENABLED + #define VMA_MAPPING_HYSTERESIS_ENABLED 1 +#endif + +#define VMA_VALIDATE(cond) do { if(!(cond)) { \ + VMA_ASSERT(0 && "Validation failed: " #cond); \ + return false; \ + } } while(false) + +/******************************************************************************* +END OF CONFIGURATION +*/ +#endif // _VMA_CONFIGURATION + + +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC; +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF; +// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F. +static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666; + +// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants. +static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040; +static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080; +static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000; +static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200; +static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000; +static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u; +static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32; +static const uint32_t VMA_VENDOR_ID_AMD = 4098; + +// This one is tricky. Vulkan specification defines this code as available since +// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131. +// See pull request #207. +#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13) + + +#if VMA_STATS_STRING_ENABLED +// Correspond to values of enum VmaSuballocationType. +static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = +{ + "FREE", + "UNKNOWN", + "BUFFER", + "IMAGE_UNKNOWN", + "IMAGE_LINEAR", + "IMAGE_OPTIMAL", +}; +#endif + +static VkAllocationCallbacks VmaEmptyAllocationCallbacks = + { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL }; + + +#ifndef _VMA_ENUM_DECLARATIONS + +enum VmaSuballocationType +{ + VMA_SUBALLOCATION_TYPE_FREE = 0, + VMA_SUBALLOCATION_TYPE_UNKNOWN = 1, + VMA_SUBALLOCATION_TYPE_BUFFER = 2, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3, + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4, + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5, + VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +}; + +enum VMA_CACHE_OPERATION +{ + VMA_CACHE_FLUSH, + VMA_CACHE_INVALIDATE +}; + +enum class VmaAllocationRequestType +{ + Normal, + TLSF, + // Used by "Linear" algorithm. + UpperAddress, + EndOf1st, + EndOf2nd, +}; + +#endif // _VMA_ENUM_DECLARATIONS + +#ifndef _VMA_FORWARD_DECLARATIONS +// Opaque handle used by allocation algorithms to identify single allocation in any conforming way. +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle); + +struct VmaMutexLock; +struct VmaMutexLockRead; +struct VmaMutexLockWrite; + +template +struct AtomicTransactionalIncrement; + +template +struct VmaStlAllocator; + +template +class VmaVector; + +template +class VmaSmallVector; + +template +class VmaPoolAllocator; + +template +struct VmaListItem; + +template +class VmaRawList; + +template +class VmaList; + +template +class VmaIntrusiveLinkedList; + +#if VMA_STATS_STRING_ENABLED +class VmaStringBuilder; +class VmaJsonWriter; +#endif + +class VmaDeviceMemoryBlock; + +struct VmaDedicatedAllocationListItemTraits; +class VmaDedicatedAllocationList; + +struct VmaSuballocation; +struct VmaSuballocationOffsetLess; +struct VmaSuballocationOffsetGreater; +struct VmaSuballocationItemSizeLess; + +typedef VmaList> VmaSuballocationList; + +struct VmaAllocationRequest; + +class VmaBlockMetadata; +class VmaBlockMetadata_Linear; +class VmaBlockMetadata_TLSF; + +class VmaBlockVector; + +struct VmaPoolListItemTraits; + +struct VmaCurrentBudgetData; + +class VmaAllocationObjectAllocator; + +#endif // _VMA_FORWARD_DECLARATIONS + + +#ifndef _VMA_FUNCTIONS + +/* +Returns number of bits set to 1 in (v). + +On specific platforms and compilers you can use intrinsics like: + +Visual Studio: + return __popcnt(v); +GCC, Clang: + return static_cast(__builtin_popcount(v)); + +Define macro VMA_COUNT_BITS_SET to provide your optimized implementation. +But you need to check in runtime whether user's CPU supports these, as some old processors don't. +*/ +static inline uint32_t VmaCountBitsSet(uint32_t v) +{ +#if VMA_CPP20 + return std::popcount(v); +#else + uint32_t c = v - ((v >> 1) & 0x55555555); + c = ((c >> 2) & 0x33333333) + (c & 0x33333333); + c = ((c >> 4) + c) & 0x0F0F0F0F; + c = ((c >> 8) + c) & 0x00FF00FF; + c = ((c >> 16) + c) & 0x0000FFFF; + return c; +#endif +} + +static inline uint8_t VmaBitScanLSB(uint64_t mask) +{ +#if defined(_MSC_VER) && defined(_WIN64) + unsigned long pos; + if (_BitScanForward64(&pos, mask)) + return static_cast(pos); + return UINT8_MAX; +#elif VMA_CPP20 + if(mask) + return static_cast(std::countr_zero(mask)); + return UINT8_MAX; +#elif defined __GNUC__ || defined __clang__ + return static_cast(__builtin_ffsll(mask)) - 1U; +#else + uint8_t pos = 0; + uint64_t bit = 1; + do + { + if (mask & bit) + return pos; + bit <<= 1; + } while (pos++ < 63); + return UINT8_MAX; +#endif +} + +static inline uint8_t VmaBitScanLSB(uint32_t mask) +{ +#ifdef _MSC_VER + unsigned long pos; + if (_BitScanForward(&pos, mask)) + return static_cast(pos); + return UINT8_MAX; +#elif VMA_CPP20 + if(mask) + return static_cast(std::countr_zero(mask)); + return UINT8_MAX; +#elif defined __GNUC__ || defined __clang__ + return static_cast(__builtin_ffs(mask)) - 1U; +#else + uint8_t pos = 0; + uint32_t bit = 1; + do + { + if (mask & bit) + return pos; + bit <<= 1; + } while (pos++ < 31); + return UINT8_MAX; +#endif +} + +static inline uint8_t VmaBitScanMSB(uint64_t mask) +{ +#if defined(_MSC_VER) && defined(_WIN64) + unsigned long pos; + if (_BitScanReverse64(&pos, mask)) + return static_cast(pos); +#elif VMA_CPP20 + if(mask) + return 63 - static_cast(std::countl_zero(mask)); +#elif defined __GNUC__ || defined __clang__ + if (mask) + return 63 - static_cast(__builtin_clzll(mask)); +#else + uint8_t pos = 63; + uint64_t bit = 1ULL << 63; + do + { + if (mask & bit) + return pos; + bit >>= 1; + } while (pos-- > 0); +#endif + return UINT8_MAX; +} + +static inline uint8_t VmaBitScanMSB(uint32_t mask) +{ +#ifdef _MSC_VER + unsigned long pos; + if (_BitScanReverse(&pos, mask)) + return static_cast(pos); +#elif VMA_CPP20 + if(mask) + return 31 - static_cast(std::countl_zero(mask)); +#elif defined __GNUC__ || defined __clang__ + if (mask) + return 31 - static_cast(__builtin_clz(mask)); +#else + uint8_t pos = 31; + uint32_t bit = 1UL << 31; + do + { + if (mask & bit) + return pos; + bit >>= 1; + } while (pos-- > 0); +#endif + return UINT8_MAX; +} + +/* +Returns true if given number is a power of two. +T must be unsigned integer number or signed integer but always nonnegative. +For 0 returns true. +*/ +template +inline bool VmaIsPow2(T x) +{ + return (x & (x - 1)) == 0; +} + +// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16. +// Use types like uint32_t, uint64_t as T. +template +static inline T VmaAlignUp(T val, T alignment) +{ + VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); + return (val + alignment - 1) & ~(alignment - 1); +} + +// Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8. +// Use types like uint32_t, uint64_t as T. +template +static inline T VmaAlignDown(T val, T alignment) +{ + VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); + return val & ~(alignment - 1); +} + +// Division with mathematical rounding to nearest number. +template +static inline T VmaRoundDiv(T x, T y) +{ + return (x + (y / (T)2)) / y; +} + +// Divide by 'y' and round up to nearest integer. +template +static inline T VmaDivideRoundingUp(T x, T y) +{ + return (x + y - (T)1) / y; +} + +// Returns smallest power of 2 greater or equal to v. +static inline uint32_t VmaNextPow2(uint32_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v++; + return v; +} + +static inline uint64_t VmaNextPow2(uint64_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v++; + return v; +} + +// Returns largest power of 2 less or equal to v. +static inline uint32_t VmaPrevPow2(uint32_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v = v ^ (v >> 1); + return v; +} + +static inline uint64_t VmaPrevPow2(uint64_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v = v ^ (v >> 1); + return v; +} + +static inline bool VmaStrIsEmpty(const char* pStr) +{ + return pStr == VMA_NULL || *pStr == '\0'; +} + +/* +Returns true if two memory blocks occupy overlapping pages. +ResourceA must be in less memory offset than ResourceB. + +Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)" +chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity". +*/ +static inline bool VmaBlocksOnSamePage( + VkDeviceSize resourceAOffset, + VkDeviceSize resourceASize, + VkDeviceSize resourceBOffset, + VkDeviceSize pageSize) +{ + VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0); + VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1; + VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1); + VkDeviceSize resourceBStart = resourceBOffset; + VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1); + return resourceAEndPage == resourceBStartPage; +} + +/* +Returns true if given suballocation types could conflict and must respect +VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer +or linear image and another one is optimal image. If type is unknown, behave +conservatively. +*/ +static inline bool VmaIsBufferImageGranularityConflict( + VmaSuballocationType suballocType1, + VmaSuballocationType suballocType2) +{ + if (suballocType1 > suballocType2) + { + std::swap(suballocType1, suballocType2); + } + + switch (suballocType1) + { + case VMA_SUBALLOCATION_TYPE_FREE: + return false; + case VMA_SUBALLOCATION_TYPE_UNKNOWN: + return true; + case VMA_SUBALLOCATION_TYPE_BUFFER: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL: + return false; + default: + VMA_ASSERT(0); + return true; + } +} + +static void VmaWriteMagicValue(void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + uint32_t* pDst = (uint32_t*)((char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for (size_t i = 0; i < numberCount; ++i, ++pDst) + { + *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE; + } +#else + // no-op +#endif +} + +static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for (size_t i = 0; i < numberCount; ++i, ++pSrc) + { + if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) + { + return false; + } + } +#endif + return true; +} + +/* +Fills structure with parameters of an example buffer to be used for transfers +during GPU memory defragmentation. +*/ +static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo) +{ + memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo)); + outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size. +} + + +/* +Performs binary search and returns iterator to first element that is greater or +equal to (key), according to comparison (cmp). + +Cmp should return true if first argument is less than second argument. + +Returned value is the found element, if present in the collection or place where +new element with value (key) should be inserted. +*/ +template +static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp) +{ + size_t down = 0, up = size_t(end - beg); + while (down < up) + { + const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation + if (cmp(*(beg + mid), key)) + { + down = mid + 1; + } + else + { + up = mid; + } + } + return beg + down; +} + +template +IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp) +{ + IterT it = VmaBinaryFindFirstNotLess( + beg, end, value, cmp); + if (it == end || + (!cmp(*it, value) && !cmp(value, *it))) + { + return it; + } + return end; +} + +/* +Returns true if all pointers in the array are not-null and unique. +Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT. +T must be pointer type, e.g. VmaAllocation, VmaPool. +*/ +template +static bool VmaValidatePointerArray(uint32_t count, const T* arr) +{ + for (uint32_t i = 0; i < count; ++i) + { + const T iPtr = arr[i]; + if (iPtr == VMA_NULL) + { + return false; + } + for (uint32_t j = i + 1; j < count; ++j) + { + if (iPtr == arr[j]) + { + return false; + } + } + } + return true; +} + +template +static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct) +{ + newStruct->pNext = mainStruct->pNext; + mainStruct->pNext = newStruct; +} +// Finds structure with s->sType == sType in mainStruct->pNext chain. +// Returns pointer to it. If not found, returns null. +template +static inline const FindT* VmaPnextChainFind(const MainT* mainStruct, VkStructureType sType) +{ + for(const VkBaseInStructure* s = (const VkBaseInStructure*)mainStruct->pNext; + s != VMA_NULL; s = s->pNext) + { + if(s->sType == sType) + { + return (const FindT*)s; + } + } + return VMA_NULL; +} + +// An abstraction over buffer or image `usage` flags, depending on available extensions. +struct VmaBufferImageUsage +{ +#if VMA_KHR_MAINTENANCE5 + typedef uint64_t BaseType; // VkFlags64 +#else + typedef uint32_t BaseType; // VkFlags32 +#endif + + static const VmaBufferImageUsage UNKNOWN; + + BaseType Value; + + VmaBufferImageUsage() { *this = UNKNOWN; } + explicit VmaBufferImageUsage(BaseType usage) : Value(usage) { } + VmaBufferImageUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5); + explicit VmaBufferImageUsage(const VkImageCreateInfo &createInfo); + + bool operator==(const VmaBufferImageUsage& rhs) const { return Value == rhs.Value; } + bool operator!=(const VmaBufferImageUsage& rhs) const { return Value != rhs.Value; } + + bool Contains(BaseType flag) const { return (Value & flag) != 0; } + bool ContainsDeviceAccess() const + { + // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same as VK_BUFFER_IMAGE_TRANSFER*. + return (Value & ~BaseType(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0; + } +}; + +const VmaBufferImageUsage VmaBufferImageUsage::UNKNOWN = VmaBufferImageUsage(0); + +static void swap(VmaBufferImageUsage& lhs, VmaBufferImageUsage& rhs) noexcept +{ + using std::swap; + swap(lhs.Value, rhs.Value); +} + +VmaBufferImageUsage::VmaBufferImageUsage(const VkBufferCreateInfo &createInfo, + bool useKhrMaintenance5) +{ +#if VMA_KHR_MAINTENANCE5 + if(useKhrMaintenance5) + { + // If VkBufferCreateInfo::pNext chain contains VkBufferUsageFlags2CreateInfoKHR, + // take usage from it and ignore VkBufferCreateInfo::usage, per specification + // of the VK_KHR_maintenance5 extension. + const VkBufferUsageFlags2CreateInfoKHR* const usageFlags2 = + VmaPnextChainFind(&createInfo, VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR); + if(usageFlags2) + { + this->Value = usageFlags2->usage; + return; + } + } +#endif + + this->Value = (BaseType)createInfo.usage; +} + +VmaBufferImageUsage::VmaBufferImageUsage(const VkImageCreateInfo &createInfo) +{ + // Maybe in the future there will be VK_KHR_maintenanceN extension with structure + // VkImageUsageFlags2CreateInfoKHR, like the one for buffers... + + this->Value = (BaseType)createInfo.usage; +} + +// This is the main algorithm that guides the selection of a memory type best for an allocation - +// converts usage to required/preferred/not preferred flags. +static bool FindMemoryPreferences( + bool isIntegratedGPU, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaBufferImageUsage bufImgUsage, + VkMemoryPropertyFlags& outRequiredFlags, + VkMemoryPropertyFlags& outPreferredFlags, + VkMemoryPropertyFlags& outNotPreferredFlags) +{ + outRequiredFlags = allocCreateInfo.requiredFlags; + outPreferredFlags = allocCreateInfo.preferredFlags; + outNotPreferredFlags = 0; + + switch(allocCreateInfo.usage) + { + case VMA_MEMORY_USAGE_UNKNOWN: + break; + case VMA_MEMORY_USAGE_GPU_ONLY: + if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_CPU_ONLY: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + break; + case VMA_MEMORY_USAGE_CPU_TO_GPU: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_GPU_TO_CPU: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + break; + case VMA_MEMORY_USAGE_CPU_COPY: + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + break; + case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: + outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; + break; + case VMA_MEMORY_USAGE_AUTO: + case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE: + case VMA_MEMORY_USAGE_AUTO_PREFER_HOST: + { + if(bufImgUsage == VmaBufferImageUsage::UNKNOWN) + { + VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known." + " Maybe you use VkBufferUsageFlags2CreateInfoKHR but forgot to use VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT?" ); + return false; + } + + const bool deviceAccess = bufImgUsage.ContainsDeviceAccess(); + const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0; + const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0; + const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0; + const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; + const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST; + + // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU. + if(hostAccessRandom) + { + // Prefer cached. Cannot require it, because some platforms don't have it (e.g. Raspberry Pi - see #362)! + outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + + if (!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) + { + // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL. + // Omitting HOST_VISIBLE here is intentional. + // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one. + // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list. + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + else + { + // Always CPU memory. + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + } + // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined. + else if(hostAccessSequentialWrite) + { + // Want uncached and write-combined. + outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + + if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + else + { + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame) + if(deviceAccess) + { + // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory. + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU) + else + { + // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory. + if(preferDevice) + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + } + } + // No CPU access + else + { + // if(deviceAccess) + // + // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory, + // unless there is a clear preference from the user not to do so. + // + // else: + // + // No direct GPU access, no CPU access, just transfers. + // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or + // a "swap file" copy to free some GPU memory (then better CPU memory). + // Up to the user to decide. If no preferece, assume the former and choose GPU memory. + + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + } + default: + VMA_ASSERT(0); + } + + // Avoid DEVICE_COHERENT unless explicitly requested. + if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) & + (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) + { + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY; + } + + return true; +} + +//////////////////////////////////////////////////////////////////////////////// +// Memory allocation + +static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment) +{ + void* result = VMA_NULL; + if ((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnAllocation != VMA_NULL)) + { + result = (*pAllocationCallbacks->pfnAllocation)( + pAllocationCallbacks->pUserData, + size, + alignment, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } + else + { + result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment); + } + VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed."); + return result; +} + +static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr) +{ + if ((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnFree != VMA_NULL)) + { + (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr); + } + else + { + VMA_SYSTEM_ALIGNED_FREE(ptr); + } +} + +template +static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +#define vma_new(allocator, type) new(VmaAllocate(allocator))(type) + +#define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type) + +template +static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr) +{ + ptr->~T(); + VmaFree(pAllocationCallbacks, ptr); +} + +template +static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count) +{ + if (ptr != VMA_NULL) + { + for (size_t i = count; i--; ) + { + ptr[i].~T(); + } + VmaFree(pAllocationCallbacks, ptr); + } +} + +static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr) +{ + if (srcStr != VMA_NULL) + { + const size_t len = strlen(srcStr); + char* const result = vma_new_array(allocs, char, len + 1); + memcpy(result, srcStr, len + 1); + return result; + } + return VMA_NULL; +} + +#if VMA_STATS_STRING_ENABLED +static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen) +{ + if (srcStr != VMA_NULL) + { + char* const result = vma_new_array(allocs, char, strLen + 1); + memcpy(result, srcStr, strLen); + result[strLen] = '\0'; + return result; + } + return VMA_NULL; +} +#endif // VMA_STATS_STRING_ENABLED + +static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str) +{ + if (str != VMA_NULL) + { + const size_t len = strlen(str); + vma_delete_array(allocs, str, len + 1); + } +} + +template +size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + const size_t indexToInsert = VmaBinaryFindFirstNotLess( + vector.data(), + vector.data() + vector.size(), + value, + CmpLess()) - vector.data(); + VmaVectorInsert(vector, indexToInsert, value); + return indexToInsert; +} + +template +bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + CmpLess comparator; + typename VectorT::iterator it = VmaBinaryFindFirstNotLess( + vector.begin(), + vector.end(), + value, + comparator); + if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it)) + { + size_t indexToRemove = it - vector.begin(); + VmaVectorRemove(vector, indexToRemove); + return true; + } + return false; +} +#endif // _VMA_FUNCTIONS + +#ifndef _VMA_STATISTICS_FUNCTIONS + +static void VmaClearStatistics(VmaStatistics& outStats) +{ + outStats.blockCount = 0; + outStats.allocationCount = 0; + outStats.blockBytes = 0; + outStats.allocationBytes = 0; +} + +static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src) +{ + inoutStats.blockCount += src.blockCount; + inoutStats.allocationCount += src.allocationCount; + inoutStats.blockBytes += src.blockBytes; + inoutStats.allocationBytes += src.allocationBytes; +} + +static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats) +{ + VmaClearStatistics(outStats.statistics); + outStats.unusedRangeCount = 0; + outStats.allocationSizeMin = VK_WHOLE_SIZE; + outStats.allocationSizeMax = 0; + outStats.unusedRangeSizeMin = VK_WHOLE_SIZE; + outStats.unusedRangeSizeMax = 0; +} + +static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size) +{ + inoutStats.statistics.allocationCount++; + inoutStats.statistics.allocationBytes += size; + inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size); + inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size); +} + +static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size) +{ + inoutStats.unusedRangeCount++; + inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size); + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size); +} + +static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src) +{ + VmaAddStatistics(inoutStats.statistics, src.statistics); + inoutStats.unusedRangeCount += src.unusedRangeCount; + inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin); + inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax); + inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin); + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax); +} + +#endif // _VMA_STATISTICS_FUNCTIONS + +#ifndef _VMA_MUTEX_LOCK +// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope). +struct VmaMutexLock +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLock) +public: + VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->Lock(); } + } + ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } } + +private: + VMA_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading. +struct VmaMutexLockRead +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockRead) +public: + VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->LockRead(); } + } + ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } } + +private: + VMA_RW_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing. +struct VmaMutexLockWrite +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockWrite) +public: + VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) + : m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->LockWrite(); } + } + ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } } + +private: + VMA_RW_MUTEX* m_pMutex; +}; + +#if VMA_DEBUG_GLOBAL_MUTEX + static VMA_MUTEX gDebugGlobalMutex; + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); +#else + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK +#endif +#endif // _VMA_MUTEX_LOCK + +#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT +// An object that increments given atomic but decrements it back in the destructor unless Commit() is called. +template +struct AtomicTransactionalIncrement +{ +public: + using T = decltype(AtomicT().load()); + + ~AtomicTransactionalIncrement() + { + if(m_Atomic) + --(*m_Atomic); + } + + void Commit() { m_Atomic = VMA_NULL; } + T Increment(AtomicT* atomic) + { + m_Atomic = atomic; + return m_Atomic->fetch_add(1); + } + +private: + AtomicT* m_Atomic = VMA_NULL; +}; +#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT + +#ifndef _VMA_STL_ALLOCATOR +// STL-compatible allocator. +template +struct VmaStlAllocator +{ + const VkAllocationCallbacks* const m_pCallbacks; + typedef T value_type; + + VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {} + template + VmaStlAllocator(const VmaStlAllocator& src) : m_pCallbacks(src.m_pCallbacks) {} + VmaStlAllocator(const VmaStlAllocator&) = default; + VmaStlAllocator& operator=(const VmaStlAllocator&) = delete; + + T* allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); } + void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); } + + template + bool operator==(const VmaStlAllocator& rhs) const + { + return m_pCallbacks == rhs.m_pCallbacks; + } + template + bool operator!=(const VmaStlAllocator& rhs) const + { + return m_pCallbacks != rhs.m_pCallbacks; + } +}; +#endif // _VMA_STL_ALLOCATOR + +#ifndef _VMA_VECTOR +/* Class with interface compatible with subset of std::vector. +T must be POD because constructors and destructors are not called and memcpy is +used for these objects. */ +template +class VmaVector +{ +public: + typedef T value_type; + typedef T* iterator; + typedef const T* const_iterator; + + VmaVector(const AllocatorT& allocator); + VmaVector(size_t count, const AllocatorT& allocator); + // This version of the constructor is here for compatibility with pre-C++14 std::vector. + // value is unused. + VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {} + VmaVector(const VmaVector& src); + VmaVector& operator=(const VmaVector& rhs); + ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); } + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_pArray; } + T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } + T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } + const T* data() const { return m_pArray; } + const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } + const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } + + iterator begin() { return m_pArray; } + iterator end() { return m_pArray + m_Count; } + const_iterator cbegin() const { return m_pArray; } + const_iterator cend() const { return m_pArray + m_Count; } + const_iterator begin() const { return cbegin(); } + const_iterator end() const { return cend(); } + + void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } + void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } + void push_front(const T& src) { insert(0, src); } + + void push_back(const T& src); + void reserve(size_t newCapacity, bool freeMemory = false); + void resize(size_t newCount); + void clear() { resize(0); } + void shrink_to_fit(); + void insert(size_t index, const T& src); + void remove(size_t index); + + T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } + const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } + +private: + AllocatorT m_Allocator; + T* m_pArray; + size_t m_Count; + size_t m_Capacity; +}; + +#ifndef _VMA_VECTOR_FUNCTIONS +template +VmaVector::VmaVector(const AllocatorT& allocator) + : m_Allocator(allocator), + m_pArray(VMA_NULL), + m_Count(0), + m_Capacity(0) {} + +template +VmaVector::VmaVector(size_t count, const AllocatorT& allocator) + : m_Allocator(allocator), + m_pArray(count ? (T*)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL), + m_Count(count), + m_Capacity(count) {} + +template +VmaVector::VmaVector(const VmaVector& src) + : m_Allocator(src.m_Allocator), + m_pArray(src.m_Count ? (T*)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), + m_Count(src.m_Count), + m_Capacity(src.m_Count) +{ + if (m_Count != 0) + { + memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T)); + } +} + +template +VmaVector& VmaVector::operator=(const VmaVector& rhs) +{ + if (&rhs != this) + { + resize(rhs.m_Count); + if (m_Count != 0) + { + memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T)); + } + } + return *this; +} + +template +void VmaVector::push_back(const T& src) +{ + const size_t newIndex = size(); + resize(newIndex + 1); + m_pArray[newIndex] = src; +} + +template +void VmaVector::reserve(size_t newCapacity, bool freeMemory) +{ + newCapacity = VMA_MAX(newCapacity, m_Count); + + if ((newCapacity < m_Capacity) && !freeMemory) + { + newCapacity = m_Capacity; + } + + if (newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL; + if (m_Count != 0) + { + memcpy(newArray, m_pArray, m_Count * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } +} + +template +void VmaVector::resize(size_t newCount) +{ + size_t newCapacity = m_Capacity; + if (newCount > m_Capacity) + { + newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8)); + } + + if (newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL; + const size_t elementsToCopy = VMA_MIN(m_Count, newCount); + if (elementsToCopy != 0) + { + memcpy(newArray, m_pArray, elementsToCopy * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } + + m_Count = newCount; +} + +template +void VmaVector::shrink_to_fit() +{ + if (m_Capacity > m_Count) + { + T* newArray = VMA_NULL; + if (m_Count > 0) + { + newArray = VmaAllocateArray(m_Allocator.m_pCallbacks, m_Count); + memcpy(newArray, m_pArray, m_Count * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = m_Count; + m_pArray = newArray; + } +} + +template +void VmaVector::insert(size_t index, const T& src) +{ + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + if (index < oldCount) + { + memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T)); + } + m_pArray[index] = src; +} + +template +void VmaVector::remove(size_t index) +{ + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if (index < oldCount - 1) + { + memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); +} +#endif // _VMA_VECTOR_FUNCTIONS + +template +static void VmaVectorInsert(VmaVector& vec, size_t index, const T& item) +{ + vec.insert(index, item); +} + +template +static void VmaVectorRemove(VmaVector& vec, size_t index) +{ + vec.remove(index); +} +#endif // _VMA_VECTOR + +#ifndef _VMA_SMALL_VECTOR +/* +This is a vector (a variable-sized array), optimized for the case when the array is small. + +It contains some number of elements in-place, which allows it to avoid heap allocation +when the actual number of elements is below that threshold. This allows normal "small" +cases to be fast without losing generality for large inputs. +*/ +template +class VmaSmallVector +{ +public: + typedef T value_type; + typedef T* iterator; + + VmaSmallVector(const AllocatorT& allocator); + VmaSmallVector(size_t count, const AllocatorT& allocator); + template + VmaSmallVector(const VmaSmallVector&) = delete; + template + VmaSmallVector& operator=(const VmaSmallVector&) = delete; + ~VmaSmallVector() = default; + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } + T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } + const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } + const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } + + iterator begin() { return data(); } + iterator end() { return data() + m_Count; } + + void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } + void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } + void push_front(const T& src) { insert(0, src); } + + void push_back(const T& src); + void resize(size_t newCount, bool freeMemory = false); + void clear(bool freeMemory = false); + void insert(size_t index, const T& src); + void remove(size_t index); + + T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } + const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } + +private: + size_t m_Count; + T m_StaticArray[N]; // Used when m_Size <= N + VmaVector m_DynamicArray; // Used when m_Size > N +}; + +#ifndef _VMA_SMALL_VECTOR_FUNCTIONS +template +VmaSmallVector::VmaSmallVector(const AllocatorT& allocator) + : m_Count(0), + m_DynamicArray(allocator) {} + +template +VmaSmallVector::VmaSmallVector(size_t count, const AllocatorT& allocator) + : m_Count(count), + m_DynamicArray(count > N ? count : 0, allocator) {} + +template +void VmaSmallVector::push_back(const T& src) +{ + const size_t newIndex = size(); + resize(newIndex + 1); + data()[newIndex] = src; +} + +template +void VmaSmallVector::resize(size_t newCount, bool freeMemory) +{ + if (newCount > N && m_Count > N) + { + // Any direction, staying in m_DynamicArray + m_DynamicArray.resize(newCount); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + } + else if (newCount > N && m_Count <= N) + { + // Growing, moving from m_StaticArray to m_DynamicArray + m_DynamicArray.resize(newCount); + if (m_Count > 0) + { + memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T)); + } + } + else if (newCount <= N && m_Count > N) + { + // Shrinking, moving from m_DynamicArray to m_StaticArray + if (newCount > 0) + { + memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T)); + } + m_DynamicArray.resize(0); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + } + else + { + // Any direction, staying in m_StaticArray - nothing to do here + } + m_Count = newCount; +} + +template +void VmaSmallVector::clear(bool freeMemory) +{ + m_DynamicArray.clear(); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + m_Count = 0; +} + +template +void VmaSmallVector::insert(size_t index, const T& src) +{ + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + T* const dataPtr = data(); + if (index < oldCount) + { + // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray. + memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T)); + } + dataPtr[index] = src; +} + +template +void VmaSmallVector::remove(size_t index) +{ + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if (index < oldCount - 1) + { + // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray. + T* const dataPtr = data(); + memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); +} +#endif // _VMA_SMALL_VECTOR_FUNCTIONS +#endif // _VMA_SMALL_VECTOR + +#ifndef _VMA_POOL_ALLOCATOR +/* +Allocator for objects of type T using a list of arrays (pools) to speed up +allocation. Number of elements that can be allocated is not bounded because +allocator can create multiple blocks. +*/ +template +class VmaPoolAllocator +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaPoolAllocator) +public: + VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity); + ~VmaPoolAllocator(); + template T* Alloc(Types&&... args); + void Free(T* ptr); + +private: + union Item + { + uint32_t NextFreeIndex; + alignas(T) char Value[sizeof(T)]; + }; + struct ItemBlock + { + Item* pItems; + uint32_t Capacity; + uint32_t FirstFreeIndex; + }; + + const VkAllocationCallbacks* m_pAllocationCallbacks; + const uint32_t m_FirstBlockCapacity; + VmaVector> m_ItemBlocks; + + ItemBlock& CreateNewBlock(); +}; + +#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS +template +VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) + : m_pAllocationCallbacks(pAllocationCallbacks), + m_FirstBlockCapacity(firstBlockCapacity), + m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks)) +{ + VMA_ASSERT(m_FirstBlockCapacity > 1); +} + +template +VmaPoolAllocator::~VmaPoolAllocator() +{ + for (size_t i = m_ItemBlocks.size(); i--;) + vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity); + m_ItemBlocks.clear(); +} + +template +template T* VmaPoolAllocator::Alloc(Types&&... args) +{ + for (size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + // This block has some free items: Use first one. + if (block.FirstFreeIndex != UINT32_MAX) + { + Item* const pItem = &block.pItems[block.FirstFreeIndex]; + block.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result)T(std::forward(args)...); // Explicit constructor call. + return result; + } + } + + // No block has free item: Create new one and use it. + ItemBlock& newBlock = CreateNewBlock(); + Item* const pItem = &newBlock.pItems[0]; + newBlock.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result) T(std::forward(args)...); // Explicit constructor call. + return result; +} + +template +void VmaPoolAllocator::Free(T* ptr) +{ + // Search all memory blocks to find ptr. + for (size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + + // Casting to union. + Item* pItemPtr; + memcpy(&pItemPtr, &ptr, sizeof(pItemPtr)); + + // Check if pItemPtr is in address range of this block. + if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity)) + { + ptr->~T(); // Explicit destructor call. + const uint32_t index = static_cast(pItemPtr - block.pItems); + pItemPtr->NextFreeIndex = block.FirstFreeIndex; + block.FirstFreeIndex = index; + return; + } + } + VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool."); +} + +template +typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock() +{ + const uint32_t newBlockCapacity = m_ItemBlocks.empty() ? + m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2; + + const ItemBlock newBlock = + { + vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity), + newBlockCapacity, + 0 + }; + + m_ItemBlocks.push_back(newBlock); + + // Setup singly-linked list of all free items in this block. + for (uint32_t i = 0; i < newBlockCapacity - 1; ++i) + newBlock.pItems[i].NextFreeIndex = i + 1; + newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX; + return m_ItemBlocks.back(); +} +#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS +#endif // _VMA_POOL_ALLOCATOR + +#ifndef _VMA_RAW_LIST +template +struct VmaListItem +{ + VmaListItem* pPrev; + VmaListItem* pNext; + T Value; +}; + +// Doubly linked list. +template +class VmaRawList +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaRawList) +public: + typedef VmaListItem ItemType; + + VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks); + // Intentionally not calling Clear, because that would be unnecessary + // computations to return all items to m_ItemAllocator as free. + ~VmaRawList() = default; + + size_t GetCount() const { return m_Count; } + bool IsEmpty() const { return m_Count == 0; } + + ItemType* Front() { return m_pFront; } + ItemType* Back() { return m_pBack; } + const ItemType* Front() const { return m_pFront; } + const ItemType* Back() const { return m_pBack; } + + ItemType* PushFront(); + ItemType* PushBack(); + ItemType* PushFront(const T& value); + ItemType* PushBack(const T& value); + void PopFront(); + void PopBack(); + + // Item can be null - it means PushBack. + ItemType* InsertBefore(ItemType* pItem); + // Item can be null - it means PushFront. + ItemType* InsertAfter(ItemType* pItem); + ItemType* InsertBefore(ItemType* pItem, const T& value); + ItemType* InsertAfter(ItemType* pItem, const T& value); + + void Clear(); + void Remove(ItemType* pItem); + +private: + const VkAllocationCallbacks* const m_pAllocationCallbacks; + VmaPoolAllocator m_ItemAllocator; + ItemType* m_pFront; + ItemType* m_pBack; + size_t m_Count; +}; + +#ifndef _VMA_RAW_LIST_FUNCTIONS +template +VmaRawList::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) + : m_pAllocationCallbacks(pAllocationCallbacks), + m_ItemAllocator(pAllocationCallbacks, 128), + m_pFront(VMA_NULL), + m_pBack(VMA_NULL), + m_Count(0) {} + +template +VmaListItem* VmaRawList::PushFront() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pPrev = VMA_NULL; + if (IsEmpty()) + { + pNewItem->pNext = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pNext = m_pFront; + m_pFront->pPrev = pNewItem; + m_pFront = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushBack() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pNext = VMA_NULL; + if(IsEmpty()) + { + pNewItem->pPrev = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pPrev = m_pBack; + m_pBack->pNext = pNewItem; + m_pBack = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushFront(const T& value) +{ + ItemType* const pNewItem = PushFront(); + pNewItem->Value = value; + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushBack(const T& value) +{ + ItemType* const pNewItem = PushBack(); + pNewItem->Value = value; + return pNewItem; +} + +template +void VmaRawList::PopFront() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pFrontItem = m_pFront; + ItemType* const pNextItem = pFrontItem->pNext; + if (pNextItem != VMA_NULL) + { + pNextItem->pPrev = VMA_NULL; + } + m_pFront = pNextItem; + m_ItemAllocator.Free(pFrontItem); + --m_Count; +} + +template +void VmaRawList::PopBack() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pBackItem = m_pBack; + ItemType* const pPrevItem = pBackItem->pPrev; + if(pPrevItem != VMA_NULL) + { + pPrevItem->pNext = VMA_NULL; + } + m_pBack = pPrevItem; + m_ItemAllocator.Free(pBackItem); + --m_Count; +} + +template +void VmaRawList::Clear() +{ + if (IsEmpty() == false) + { + ItemType* pItem = m_pBack; + while (pItem != VMA_NULL) + { + ItemType* const pPrevItem = pItem->pPrev; + m_ItemAllocator.Free(pItem); + pItem = pPrevItem; + } + m_pFront = VMA_NULL; + m_pBack = VMA_NULL; + m_Count = 0; + } +} + +template +void VmaRawList::Remove(ItemType* pItem) +{ + VMA_HEAVY_ASSERT(pItem != VMA_NULL); + VMA_HEAVY_ASSERT(m_Count > 0); + + if(pItem->pPrev != VMA_NULL) + { + pItem->pPrev->pNext = pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = pItem->pNext; + } + + if(pItem->pNext != VMA_NULL) + { + pItem->pNext->pPrev = pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = pItem->pPrev; + } + + m_ItemAllocator.Free(pItem); + --m_Count; +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const prevItem = pItem->pPrev; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pPrev = prevItem; + newItem->pNext = pItem; + pItem->pPrev = newItem; + if(prevItem != VMA_NULL) + { + prevItem->pNext = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = newItem; + } + ++m_Count; + return newItem; + } + else + return PushBack(); +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const nextItem = pItem->pNext; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pNext = nextItem; + newItem->pPrev = pItem; + pItem->pNext = newItem; + if(nextItem != VMA_NULL) + { + nextItem->pPrev = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = newItem; + } + ++m_Count; + return newItem; + } + else + return PushFront(); +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertBefore(pItem); + newItem->Value = value; + return newItem; +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertAfter(pItem); + newItem->Value = value; + return newItem; +} +#endif // _VMA_RAW_LIST_FUNCTIONS +#endif // _VMA_RAW_LIST + +#ifndef _VMA_LIST +template +class VmaList +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaList) +public: + class reverse_iterator; + class const_iterator; + class const_reverse_iterator; + + class iterator + { + friend class const_iterator; + friend class VmaList; + public: + iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + iterator operator++(int) { iterator result = *this; ++*this; return result; } + iterator operator--(int) { iterator result = *this; --*this; return result; } + + iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } + iterator& operator--(); + + private: + VmaRawList* m_pList; + VmaListItem* m_pItem; + + iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class reverse_iterator + { + friend class const_reverse_iterator; + friend class VmaList; + public: + reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; } + reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; } + + reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } + reverse_iterator& operator--(); + + private: + VmaRawList* m_pList; + VmaListItem* m_pItem; + + reverse_iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class const_iterator + { + friend class VmaList; + public: + const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } + + const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; } + const_iterator operator--(int) { const_iterator result = *this; --* this; return result; } + + const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } + const_iterator& operator--(); + + private: + const VmaRawList* m_pList; + const VmaListItem* m_pItem; + + const_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class const_reverse_iterator + { + friend class VmaList; + public: + const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + reverse_iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } + + const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; } + const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; } + + const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } + const_reverse_iterator& operator--(); + + private: + const VmaRawList* m_pList; + const VmaListItem* m_pItem; + + const_reverse_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + + VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {} + + bool empty() const { return m_RawList.IsEmpty(); } + size_t size() const { return m_RawList.GetCount(); } + + iterator begin() { return iterator(&m_RawList, m_RawList.Front()); } + iterator end() { return iterator(&m_RawList, VMA_NULL); } + + const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); } + const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); } + + const_iterator begin() const { return cbegin(); } + const_iterator end() const { return cend(); } + + reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); } + reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); } + + const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); } + const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); } + + const_reverse_iterator rbegin() const { return crbegin(); } + const_reverse_iterator rend() const { return crend(); } + + void push_back(const T& value) { m_RawList.PushBack(value); } + iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } + + void clear() { m_RawList.Clear(); } + void erase(iterator it) { m_RawList.Remove(it.m_pItem); } + +private: + VmaRawList m_RawList; +}; + +#ifndef _VMA_LIST_FUNCTIONS +template +typename VmaList::iterator& VmaList::iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} + +template +typename VmaList::reverse_iterator& VmaList::reverse_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Front(); + } + return *this; +} + +template +typename VmaList::const_iterator& VmaList::const_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} + +template +typename VmaList::const_reverse_iterator& VmaList::const_reverse_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} +#endif // _VMA_LIST_FUNCTIONS +#endif // _VMA_LIST + +#ifndef _VMA_INTRUSIVE_LINKED_LIST +/* +Expected interface of ItemTypeTraits: +struct MyItemTypeTraits +{ + typedef MyItem ItemType; + static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; } + static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; } + static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; } + static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; } +}; +*/ +template +class VmaIntrusiveLinkedList +{ +public: + typedef typename ItemTypeTraits::ItemType ItemType; + static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); } + static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); } + + // Movable, not copyable. + VmaIntrusiveLinkedList() = default; + VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src); + VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete; + VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src); + VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete; + ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); } + + size_t GetCount() const { return m_Count; } + bool IsEmpty() const { return m_Count == 0; } + ItemType* Front() { return m_Front; } + ItemType* Back() { return m_Back; } + const ItemType* Front() const { return m_Front; } + const ItemType* Back() const { return m_Back; } + + void PushBack(ItemType* item); + void PushFront(ItemType* item); + ItemType* PopBack(); + ItemType* PopFront(); + + // MyItem can be null - it means PushBack. + void InsertBefore(ItemType* existingItem, ItemType* newItem); + // MyItem can be null - it means PushFront. + void InsertAfter(ItemType* existingItem, ItemType* newItem); + void Remove(ItemType* item); + void RemoveAll(); + +private: + ItemType* m_Front = VMA_NULL; + ItemType* m_Back = VMA_NULL; + size_t m_Count = 0; +}; + +#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS +template +VmaIntrusiveLinkedList::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src) + : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count) +{ + src.m_Front = src.m_Back = VMA_NULL; + src.m_Count = 0; +} + +template +VmaIntrusiveLinkedList& VmaIntrusiveLinkedList::operator=(VmaIntrusiveLinkedList&& src) +{ + if (&src != this) + { + VMA_HEAVY_ASSERT(IsEmpty()); + m_Front = src.m_Front; + m_Back = src.m_Back; + m_Count = src.m_Count; + src.m_Front = src.m_Back = VMA_NULL; + src.m_Count = 0; + } + return *this; +} + +template +void VmaIntrusiveLinkedList::PushBack(ItemType* item) +{ + VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); + if (IsEmpty()) + { + m_Front = item; + m_Back = item; + m_Count = 1; + } + else + { + ItemTypeTraits::AccessPrev(item) = m_Back; + ItemTypeTraits::AccessNext(m_Back) = item; + m_Back = item; + ++m_Count; + } +} + +template +void VmaIntrusiveLinkedList::PushFront(ItemType* item) +{ + VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); + if (IsEmpty()) + { + m_Front = item; + m_Back = item; + m_Count = 1; + } + else + { + ItemTypeTraits::AccessNext(item) = m_Front; + ItemTypeTraits::AccessPrev(m_Front) = item; + m_Front = item; + ++m_Count; + } +} + +template +typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopBack() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const backItem = m_Back; + ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem); + if (prevItem != VMA_NULL) + { + ItemTypeTraits::AccessNext(prevItem) = VMA_NULL; + } + m_Back = prevItem; + --m_Count; + ItemTypeTraits::AccessPrev(backItem) = VMA_NULL; + ItemTypeTraits::AccessNext(backItem) = VMA_NULL; + return backItem; +} + +template +typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopFront() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const frontItem = m_Front; + ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem); + if (nextItem != VMA_NULL) + { + ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL; + } + m_Front = nextItem; + --m_Count; + ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL; + ItemTypeTraits::AccessNext(frontItem) = VMA_NULL; + return frontItem; +} + +template +void VmaIntrusiveLinkedList::InsertBefore(ItemType* existingItem, ItemType* newItem) +{ + VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); + if (existingItem != VMA_NULL) + { + ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem); + ItemTypeTraits::AccessPrev(newItem) = prevItem; + ItemTypeTraits::AccessNext(newItem) = existingItem; + ItemTypeTraits::AccessPrev(existingItem) = newItem; + if (prevItem != VMA_NULL) + { + ItemTypeTraits::AccessNext(prevItem) = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_Front == existingItem); + m_Front = newItem; + } + ++m_Count; + } + else + PushBack(newItem); +} + +template +void VmaIntrusiveLinkedList::InsertAfter(ItemType* existingItem, ItemType* newItem) +{ + VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); + if (existingItem != VMA_NULL) + { + ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem); + ItemTypeTraits::AccessNext(newItem) = nextItem; + ItemTypeTraits::AccessPrev(newItem) = existingItem; + ItemTypeTraits::AccessNext(existingItem) = newItem; + if (nextItem != VMA_NULL) + { + ItemTypeTraits::AccessPrev(nextItem) = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_Back == existingItem); + m_Back = newItem; + } + ++m_Count; + } + else + return PushFront(newItem); +} + +template +void VmaIntrusiveLinkedList::Remove(ItemType* item) +{ + VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0); + if (ItemTypeTraits::GetPrev(item) != VMA_NULL) + { + ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item); + } + else + { + VMA_HEAVY_ASSERT(m_Front == item); + m_Front = ItemTypeTraits::GetNext(item); + } + + if (ItemTypeTraits::GetNext(item) != VMA_NULL) + { + ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item); + } + else + { + VMA_HEAVY_ASSERT(m_Back == item); + m_Back = ItemTypeTraits::GetPrev(item); + } + ItemTypeTraits::AccessPrev(item) = VMA_NULL; + ItemTypeTraits::AccessNext(item) = VMA_NULL; + --m_Count; +} + +template +void VmaIntrusiveLinkedList::RemoveAll() +{ + if (!IsEmpty()) + { + ItemType* item = m_Back; + while (item != VMA_NULL) + { + ItemType* const prevItem = ItemTypeTraits::AccessPrev(item); + ItemTypeTraits::AccessPrev(item) = VMA_NULL; + ItemTypeTraits::AccessNext(item) = VMA_NULL; + item = prevItem; + } + m_Front = VMA_NULL; + m_Back = VMA_NULL; + m_Count = 0; + } +} +#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS +#endif // _VMA_INTRUSIVE_LINKED_LIST + +#if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED +class VmaStringBuilder +{ +public: + VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator(allocationCallbacks)) {} + ~VmaStringBuilder() = default; + + size_t GetLength() const { return m_Data.size(); } + const char* GetData() const { return m_Data.data(); } + void AddNewLine() { Add('\n'); } + void Add(char ch) { m_Data.push_back(ch); } + + void Add(const char* pStr); + void AddNumber(uint32_t num); + void AddNumber(uint64_t num); + void AddPointer(const void* ptr); + +private: + VmaVector> m_Data; +}; + +#ifndef _VMA_STRING_BUILDER_FUNCTIONS +void VmaStringBuilder::Add(const char* pStr) +{ + const size_t strLen = strlen(pStr); + if (strLen > 0) + { + const size_t oldCount = m_Data.size(); + m_Data.resize(oldCount + strLen); + memcpy(m_Data.data() + oldCount, pStr, strLen); + } +} + +void VmaStringBuilder::AddNumber(uint32_t num) +{ + char buf[11]; + buf[10] = '\0'; + char* p = &buf[10]; + do + { + *--p = '0' + (char)(num % 10); + num /= 10; + } while (num); + Add(p); +} + +void VmaStringBuilder::AddNumber(uint64_t num) +{ + char buf[21]; + buf[20] = '\0'; + char* p = &buf[20]; + do + { + *--p = '0' + (char)(num % 10); + num /= 10; + } while (num); + Add(p); +} + +void VmaStringBuilder::AddPointer(const void* ptr) +{ + char buf[21]; + VmaPtrToStr(buf, sizeof(buf), ptr); + Add(buf); +} +#endif //_VMA_STRING_BUILDER_FUNCTIONS +#endif // _VMA_STRING_BUILDER + +#if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED +/* +Allows to conveniently build a correct JSON document to be written to the +VmaStringBuilder passed to the constructor. +*/ +class VmaJsonWriter +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaJsonWriter) +public: + // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object. + VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb); + ~VmaJsonWriter(); + + // Begins object by writing "{". + // Inside an object, you must call pairs of WriteString and a value, e.g.: + // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject(); + // Will write: { "A": 1, "B": 2 } + void BeginObject(bool singleLine = false); + // Ends object by writing "}". + void EndObject(); + + // Begins array by writing "[". + // Inside an array, you can write a sequence of any values. + void BeginArray(bool singleLine = false); + // Ends array by writing "[". + void EndArray(); + + // Writes a string value inside "". + // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped. + void WriteString(const char* pStr); + + // Begins writing a string value. + // Call BeginString, ContinueString, ContinueString, ..., EndString instead of + // WriteString to conveniently build the string content incrementally, made of + // parts including numbers. + void BeginString(const char* pStr = VMA_NULL); + // Posts next part of an open string. + void ContinueString(const char* pStr); + // Posts next part of an open string. The number is converted to decimal characters. + void ContinueString(uint32_t n); + void ContinueString(uint64_t n); + // Posts next part of an open string. Pointer value is converted to characters + // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00 + void ContinueString_Pointer(const void* ptr); + // Ends writing a string value by writing '"'. + void EndString(const char* pStr = VMA_NULL); + + // Writes a number value. + void WriteNumber(uint32_t n); + void WriteNumber(uint64_t n); + // Writes a boolean value - false or true. + void WriteBool(bool b); + // Writes a null value. + void WriteNull(); + +private: + enum COLLECTION_TYPE + { + COLLECTION_TYPE_OBJECT, + COLLECTION_TYPE_ARRAY, + }; + struct StackItem + { + COLLECTION_TYPE type; + uint32_t valueCount; + bool singleLineMode; + }; + + static const char* const INDENT; + + VmaStringBuilder& m_SB; + VmaVector< StackItem, VmaStlAllocator > m_Stack; + bool m_InsideString; + + void BeginValue(bool isString); + void WriteIndent(bool oneLess = false); +}; +const char* const VmaJsonWriter::INDENT = " "; + +#ifndef _VMA_JSON_WRITER_FUNCTIONS +VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) + : m_SB(sb), + m_Stack(VmaStlAllocator(pAllocationCallbacks)), + m_InsideString(false) {} + +VmaJsonWriter::~VmaJsonWriter() +{ + VMA_ASSERT(!m_InsideString); + VMA_ASSERT(m_Stack.empty()); +} + +void VmaJsonWriter::BeginObject(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('{'); + + StackItem item; + item.type = COLLECTION_TYPE_OBJECT; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndObject() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add('}'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT); + m_Stack.pop_back(); +} + +void VmaJsonWriter::BeginArray(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('['); + + StackItem item; + item.type = COLLECTION_TYPE_ARRAY; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndArray() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add(']'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY); + m_Stack.pop_back(); +} + +void VmaJsonWriter::WriteString(const char* pStr) +{ + BeginString(pStr); + EndString(); +} + +void VmaJsonWriter::BeginString(const char* pStr) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(true); + m_SB.Add('"'); + m_InsideString = true; + if (pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } +} + +void VmaJsonWriter::ContinueString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + + const size_t strLen = strlen(pStr); + for (size_t i = 0; i < strLen; ++i) + { + char ch = pStr[i]; + if (ch == '\\') + { + m_SB.Add("\\\\"); + } + else if (ch == '"') + { + m_SB.Add("\\\""); + } + else if ((uint8_t)ch >= 32) + { + m_SB.Add(ch); + } + else switch (ch) + { + case '\b': + m_SB.Add("\\b"); + break; + case '\f': + m_SB.Add("\\f"); + break; + case '\n': + m_SB.Add("\\n"); + break; + case '\r': + m_SB.Add("\\r"); + break; + case '\t': + m_SB.Add("\\t"); + break; + default: + VMA_ASSERT(0 && "Character not currently supported."); + } + } +} + +void VmaJsonWriter::ContinueString(uint32_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString(uint64_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString_Pointer(const void* ptr) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddPointer(ptr); +} + +void VmaJsonWriter::EndString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + if (pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } + m_SB.Add('"'); + m_InsideString = false; +} + +void VmaJsonWriter::WriteNumber(uint32_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteNumber(uint64_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteBool(bool b) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add(b ? "true" : "false"); +} + +void VmaJsonWriter::WriteNull() +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add("null"); +} + +void VmaJsonWriter::BeginValue(bool isString) +{ + if (!m_Stack.empty()) + { + StackItem& currItem = m_Stack.back(); + if (currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 == 0) + { + VMA_ASSERT(isString); + } + + if (currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 != 0) + { + m_SB.Add(": "); + } + else if (currItem.valueCount > 0) + { + m_SB.Add(", "); + WriteIndent(); + } + else + { + WriteIndent(); + } + ++currItem.valueCount; + } +} + +void VmaJsonWriter::WriteIndent(bool oneLess) +{ + if (!m_Stack.empty() && !m_Stack.back().singleLineMode) + { + m_SB.AddNewLine(); + + size_t count = m_Stack.size(); + if (count > 0 && oneLess) + { + --count; + } + for (size_t i = 0; i < count; ++i) + { + m_SB.Add(INDENT); + } + } +} +#endif // _VMA_JSON_WRITER_FUNCTIONS + +static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat) +{ + json.BeginObject(); + + json.WriteString("BlockCount"); + json.WriteNumber(stat.statistics.blockCount); + json.WriteString("BlockBytes"); + json.WriteNumber(stat.statistics.blockBytes); + json.WriteString("AllocationCount"); + json.WriteNumber(stat.statistics.allocationCount); + json.WriteString("AllocationBytes"); + json.WriteNumber(stat.statistics.allocationBytes); + json.WriteString("UnusedRangeCount"); + json.WriteNumber(stat.unusedRangeCount); + + if (stat.statistics.allocationCount > 1) + { + json.WriteString("AllocationSizeMin"); + json.WriteNumber(stat.allocationSizeMin); + json.WriteString("AllocationSizeMax"); + json.WriteNumber(stat.allocationSizeMax); + } + if (stat.unusedRangeCount > 1) + { + json.WriteString("UnusedRangeSizeMin"); + json.WriteNumber(stat.unusedRangeSizeMin); + json.WriteString("UnusedRangeSizeMax"); + json.WriteNumber(stat.unusedRangeSizeMax); + } + json.EndObject(); +} +#endif // _VMA_JSON_WRITER + +#ifndef _VMA_MAPPING_HYSTERESIS + +class VmaMappingHysteresis +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaMappingHysteresis) +public: + VmaMappingHysteresis() = default; + + uint32_t GetExtraMapping() const { return m_ExtraMapping; } + + // Call when Map was called. + // Returns true if switched to extra +1 mapping reference count. + bool PostMap() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 0) + { + ++m_MajorCounter; + if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING) + { + m_ExtraMapping = 1; + m_MajorCounter = 0; + m_MinorCounter = 0; + return true; + } + } + else // m_ExtraMapping == 1 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + return false; + } + + // Call when Unmap was called. + void PostUnmap() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 0) + ++m_MajorCounter; + else // m_ExtraMapping == 1 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + } + + // Call when allocation was made from the memory block. + void PostAlloc() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 1) + ++m_MajorCounter; + else // m_ExtraMapping == 0 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + } + + // Call when allocation was freed from the memory block. + // Returns true if switched to extra -1 mapping reference count. + bool PostFree() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 1) + { + ++m_MajorCounter; + if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING && + m_MajorCounter > m_MinorCounter + 1) + { + m_ExtraMapping = 0; + m_MajorCounter = 0; + m_MinorCounter = 0; + return true; + } + } + else // m_ExtraMapping == 0 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + return false; + } + +private: + static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7; + + uint32_t m_MinorCounter = 0; + uint32_t m_MajorCounter = 0; + uint32_t m_ExtraMapping = 0; // 0 or 1. + + void PostMinorCounter() + { + if(m_MinorCounter < m_MajorCounter) + { + ++m_MinorCounter; + } + else if(m_MajorCounter > 0) + { + --m_MajorCounter; + --m_MinorCounter; + } + } +}; + +#endif // _VMA_MAPPING_HYSTERESIS + +#ifndef _VMA_DEVICE_MEMORY_BLOCK +/* +Represents a single block of device memory (`VkDeviceMemory`) with all the +data about its regions (aka suballocations, #VmaAllocation), assigned and free. + +Thread-safety: +- Access to m_pMetadata must be externally synchronized. +- Map, Unmap, Bind* are synchronized internally. +*/ +class VmaDeviceMemoryBlock +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaDeviceMemoryBlock) +public: + VmaBlockMetadata* m_pMetadata; + + VmaDeviceMemoryBlock(VmaAllocator hAllocator); + ~VmaDeviceMemoryBlock(); + + // Always call after construction. + void Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm, + VkDeviceSize bufferImageGranularity); + // Always call before destruction. + void Destroy(VmaAllocator allocator); + + VmaPool GetParentPool() const { return m_hParentPool; } + VkDeviceMemory GetDeviceMemory() const { return m_hMemory; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + uint32_t GetId() const { return m_Id; } + void* GetMappedData() const { return m_pMappedData; } + uint32_t GetMapRefCount() const { return m_MapCount; } + + // Call when allocation/free was made from m_pMetadata. + // Used for m_MappingHysteresis. + void PostAlloc(VmaAllocator hAllocator); + void PostFree(VmaAllocator hAllocator); + + // Validates all data structures inside this object. If not valid, returns false. + bool Validate() const; + VkResult CheckCorruption(VmaAllocator hAllocator); + + // ppData can be null. + VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData); + void Unmap(VmaAllocator hAllocator, uint32_t count); + + VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + + VkResult BindBufferMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); + +private: + VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. + uint32_t m_MemoryTypeIndex; + uint32_t m_Id; + VkDeviceMemory m_hMemory; + + /* + Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory. + Also protects m_MapCount, m_pMappedData. + Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex. + */ + VMA_MUTEX m_MapAndBindMutex; + VmaMappingHysteresis m_MappingHysteresis; + uint32_t m_MapCount; + void* m_pMappedData; +}; +#endif // _VMA_DEVICE_MEMORY_BLOCK + +#ifndef _VMA_ALLOCATION_T +struct VmaAllocation_T +{ + friend struct VmaDedicatedAllocationListItemTraits; + + enum FLAGS + { + FLAG_PERSISTENT_MAP = 0x01, + FLAG_MAPPING_ALLOWED = 0x02, + }; + +public: + enum ALLOCATION_TYPE + { + ALLOCATION_TYPE_NONE, + ALLOCATION_TYPE_BLOCK, + ALLOCATION_TYPE_DEDICATED, + }; + + // This struct is allocated using VmaPoolAllocator. + VmaAllocation_T(bool mappingAllowed); + ~VmaAllocation_T(); + + void InitBlockAllocation( + VmaDeviceMemoryBlock* block, + VmaAllocHandle allocHandle, + VkDeviceSize alignment, + VkDeviceSize size, + uint32_t memoryTypeIndex, + VmaSuballocationType suballocationType, + bool mapped); + // pMappedData not null means allocation is created with MAPPED flag. + void InitDedicatedAllocation( + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceMemory hMemory, + VmaSuballocationType suballocationType, + void* pMappedData, + VkDeviceSize size); + + ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; } + VkDeviceSize GetAlignment() const { return m_Alignment; } + VkDeviceSize GetSize() const { return m_Size; } + void* GetUserData() const { return m_pUserData; } + const char* GetName() const { return m_pName; } + VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; } + + VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; } + bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; } + + void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; } + void SetName(VmaAllocator hAllocator, const char* pName); + void FreeName(VmaAllocator hAllocator); + uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation); + VmaAllocHandle GetAllocHandle() const; + VkDeviceSize GetOffset() const; + VmaPool GetParentPool() const; + VkDeviceMemory GetMemory() const; + void* GetMappedData() const; + + void BlockAllocMap(); + void BlockAllocUnmap(); + VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData); + void DedicatedAllocUnmap(VmaAllocator hAllocator); + +#if VMA_STATS_STRING_ENABLED + VmaBufferImageUsage GetBufferImageUsage() const { return m_BufferImageUsage; } + void InitBufferUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5) + { + VMA_ASSERT(m_BufferImageUsage == VmaBufferImageUsage::UNKNOWN); + m_BufferImageUsage = VmaBufferImageUsage(createInfo, useKhrMaintenance5); + } + void InitImageUsage(const VkImageCreateInfo &createInfo) + { + VMA_ASSERT(m_BufferImageUsage == VmaBufferImageUsage::UNKNOWN); + m_BufferImageUsage = VmaBufferImageUsage(createInfo); + } + void PrintParameters(class VmaJsonWriter& json) const; +#endif + +private: + // Allocation out of VmaDeviceMemoryBlock. + struct BlockAllocation + { + VmaDeviceMemoryBlock* m_Block; + VmaAllocHandle m_AllocHandle; + }; + // Allocation for an object that has its own private VkDeviceMemory. + struct DedicatedAllocation + { + VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. + VkDeviceMemory m_hMemory; + void* m_pMappedData; // Not null means memory is mapped. + VmaAllocation_T* m_Prev; + VmaAllocation_T* m_Next; + }; + union + { + // Allocation out of VmaDeviceMemoryBlock. + BlockAllocation m_BlockAllocation; + // Allocation for an object that has its own private VkDeviceMemory. + DedicatedAllocation m_DedicatedAllocation; + }; + + VkDeviceSize m_Alignment; + VkDeviceSize m_Size; + void* m_pUserData; + char* m_pName; + uint32_t m_MemoryTypeIndex; + uint8_t m_Type; // ALLOCATION_TYPE + uint8_t m_SuballocationType; // VmaSuballocationType + // Reference counter for vmaMapMemory()/vmaUnmapMemory(). + uint8_t m_MapCount; + uint8_t m_Flags; // enum FLAGS +#if VMA_STATS_STRING_ENABLED + VmaBufferImageUsage m_BufferImageUsage; // 0 if unknown. +#endif +}; +#endif // _VMA_ALLOCATION_T + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS +struct VmaDedicatedAllocationListItemTraits +{ + typedef VmaAllocation_T ItemType; + + static ItemType* GetPrev(const ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Prev; + } + static ItemType* GetNext(const ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Next; + } + static ItemType*& AccessPrev(ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Prev; + } + static ItemType*& AccessNext(ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Next; + } +}; +#endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST +/* +Stores linked list of VmaAllocation_T objects. +Thread-safe, synchronized internally. +*/ +class VmaDedicatedAllocationList +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaDedicatedAllocationList) +public: + VmaDedicatedAllocationList() {} + ~VmaDedicatedAllocationList(); + + void Init(bool useMutex) { m_UseMutex = useMutex; } + bool Validate(); + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); + void AddStatistics(VmaStatistics& inoutStats); +#if VMA_STATS_STRING_ENABLED + // Writes JSON array with the list of allocations. + void BuildStatsString(VmaJsonWriter& json); +#endif + + bool IsEmpty(); + void Register(VmaAllocation alloc); + void Unregister(VmaAllocation alloc); + +private: + typedef VmaIntrusiveLinkedList DedicatedAllocationLinkedList; + + bool m_UseMutex = true; + VMA_RW_MUTEX m_Mutex; + DedicatedAllocationLinkedList m_AllocationList; +}; + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS + +VmaDedicatedAllocationList::~VmaDedicatedAllocationList() +{ + VMA_HEAVY_ASSERT(Validate()); + + if (!m_AllocationList.IsEmpty()) + { + VMA_ASSERT_LEAK(false && "Unfreed dedicated allocations found!"); + } +} + +bool VmaDedicatedAllocationList::Validate() +{ + const size_t declaredCount = m_AllocationList.GetCount(); + size_t actualCount = 0; + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + for (VmaAllocation alloc = m_AllocationList.Front(); + alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) + { + ++actualCount; + } + VMA_VALIDATE(actualCount == declaredCount); + + return true; +} + +void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) +{ + for(auto* item = m_AllocationList.Front(); item != VMA_NULL; item = DedicatedAllocationLinkedList::GetNext(item)) + { + const VkDeviceSize size = item->GetSize(); + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += size; + VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize()); + } +} + +void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + + const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount(); + inoutStats.blockCount += allocCount; + inoutStats.allocationCount += allocCount; + + for(auto* item = m_AllocationList.Front(); item != VMA_NULL; item = DedicatedAllocationLinkedList::GetNext(item)) + { + const VkDeviceSize size = item->GetSize(); + inoutStats.blockBytes += size; + inoutStats.allocationBytes += size; + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json) +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + json.BeginArray(); + for (VmaAllocation alloc = m_AllocationList.Front(); + alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) + { + json.BeginObject(true); + alloc->PrintParameters(json); + json.EndObject(); + } + json.EndArray(); +} +#endif // VMA_STATS_STRING_ENABLED + +bool VmaDedicatedAllocationList::IsEmpty() +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + return m_AllocationList.IsEmpty(); +} + +void VmaDedicatedAllocationList::Register(VmaAllocation alloc) +{ + VmaMutexLockWrite lock(m_Mutex, m_UseMutex); + m_AllocationList.PushBack(alloc); +} + +void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc) +{ + VmaMutexLockWrite lock(m_Mutex, m_UseMutex); + m_AllocationList.Remove(alloc); +} +#endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS +#endif // _VMA_DEDICATED_ALLOCATION_LIST + +#ifndef _VMA_SUBALLOCATION +/* +Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as +allocated memory block or free. +*/ +struct VmaSuballocation +{ + VkDeviceSize offset; + VkDeviceSize size; + void* userData; + VmaSuballocationType type; +}; + +// Comparator for offsets. +struct VmaSuballocationOffsetLess +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset < rhs.offset; + } +}; + +struct VmaSuballocationOffsetGreater +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset > rhs.offset; + } +}; + +struct VmaSuballocationItemSizeLess +{ + bool operator()(const VmaSuballocationList::iterator lhs, + const VmaSuballocationList::iterator rhs) const + { + return lhs->size < rhs->size; + } + + bool operator()(const VmaSuballocationList::iterator lhs, + VkDeviceSize rhsSize) const + { + return lhs->size < rhsSize; + } +}; +#endif // _VMA_SUBALLOCATION + +#ifndef _VMA_ALLOCATION_REQUEST +/* +Parameters of planned allocation inside a VmaDeviceMemoryBlock. +item points to a FREE suballocation. +*/ +struct VmaAllocationRequest +{ + VmaAllocHandle allocHandle; + VkDeviceSize size; + VmaSuballocationList::iterator item; + void* customData; + uint64_t algorithmData; + VmaAllocationRequestType type; +}; +#endif // _VMA_ALLOCATION_REQUEST + +#ifndef _VMA_BLOCK_METADATA +/* +Data structure used for bookkeeping of allocations and unused ranges of memory +in a single VkDeviceMemory block. +*/ +class VmaBlockMetadata +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata) +public: + // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object. + VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata() = default; + + virtual void Init(VkDeviceSize size) { m_Size = size; } + bool IsVirtual() const { return m_IsVirtual; } + VkDeviceSize GetSize() const { return m_Size; } + + // Validates all data structures inside this object. If not valid, returns false. + virtual bool Validate() const = 0; + virtual size_t GetAllocationCount() const = 0; + virtual size_t GetFreeRegionsCount() const = 0; + virtual VkDeviceSize GetSumFreeSize() const = 0; + // Returns true if this block is empty - contains only single free suballocation. + virtual bool IsEmpty() const = 0; + virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0; + virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0; + virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0; + + virtual VmaAllocHandle GetAllocationListBegin() const = 0; + virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0; + virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0; + + // Shouldn't modify blockCount. + virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0; + virtual void AddStatistics(VmaStatistics& inoutStats) const = 0; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; +#endif + + // Tries to find a place for suballocation with given parameters inside this block. + // If succeeded, fills pAllocationRequest and returns true. + // If failed, returns false. + virtual bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags. + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) = 0; + + virtual VkResult CheckCorruption(const void* pBlockData) = 0; + + // Makes actual allocation based on request. Request must already be checked and valid. + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) = 0; + + // Frees suballocation assigned to given memory region. + virtual void Free(VmaAllocHandle allocHandle) = 0; + + // Frees all allocations. + // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations! + virtual void Clear() = 0; + + virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0; + virtual void DebugLogAllAllocations() const = 0; + +protected: + const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } + VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } + VkDeviceSize GetDebugMargin() const { return VkDeviceSize(IsVirtual() ? 0 : VMA_DEBUG_MARGIN); } + + void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const; +#if VMA_STATS_STRING_ENABLED + // mapRefCount == UINT32_MAX means unspecified. + void PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, + size_t allocationCount, + size_t unusedRangeCount) const; + void PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size, void* userData) const; + void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, + VkDeviceSize size) const; + void PrintDetailedMap_End(class VmaJsonWriter& json) const; +#endif + +private: + VkDeviceSize m_Size; + const VkAllocationCallbacks* m_pAllocationCallbacks; + const VkDeviceSize m_BufferImageGranularity; + const bool m_IsVirtual; +}; + +#ifndef _VMA_BLOCK_METADATA_FUNCTIONS +VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : m_Size(0), + m_pAllocationCallbacks(pAllocationCallbacks), + m_BufferImageGranularity(bufferImageGranularity), + m_IsVirtual(isVirtual) {} + +void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const +{ + if (IsVirtual()) + { + VMA_LEAK_LOG_FORMAT("UNFREED VIRTUAL ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p", offset, size, userData); + } + else + { + VMA_ASSERT(userData != VMA_NULL); + VmaAllocation allocation = reinterpret_cast(userData); + + userData = allocation->GetUserData(); + const char* name = allocation->GetName(); + +#if VMA_STATS_STRING_ENABLED + VMA_LEAK_LOG_FORMAT("UNFREED ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p; Name: %s; Type: %s; Usage: %" PRIu64, + offset, size, userData, name ? name : "vma_empty", + VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()], + (uint64_t)allocation->GetBufferImageUsage().Value); +#else + VMA_LEAK_LOG_FORMAT("UNFREED ALLOCATION; Offset: %" PRIu64 "; Size: %" PRIu64 "; UserData: %p; Name: %s; Type: %u", + offset, size, userData, name ? name : "vma_empty", + (unsigned)allocation->GetSuballocationType()); +#endif // VMA_STATS_STRING_ENABLED + } + +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const +{ + json.WriteString("TotalBytes"); + json.WriteNumber(GetSize()); + + json.WriteString("UnusedBytes"); + json.WriteNumber(unusedBytes); + + json.WriteString("Allocations"); + json.WriteNumber((uint64_t)allocationCount); + + json.WriteString("UnusedRanges"); + json.WriteNumber((uint64_t)unusedRangeCount); + + json.WriteString("Suballocations"); + json.BeginArray(); +} + +void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size, void* userData) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + if (IsVirtual()) + { + json.WriteString("Size"); + json.WriteNumber(size); + if (userData) + { + json.WriteString("CustomData"); + json.BeginString(); + json.ContinueString_Pointer(userData); + json.EndString(); + } + } + else + { + ((VmaAllocation)userData)->PrintParameters(json); + } + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]); + + json.WriteString("Size"); + json.WriteNumber(size); + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const +{ + json.EndArray(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_BLOCK_METADATA_FUNCTIONS +#endif // _VMA_BLOCK_METADATA + +#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY +// Before deleting object of this class remember to call 'Destroy()' +class VmaBlockBufferImageGranularity final +{ +public: + struct ValidationContext + { + const VkAllocationCallbacks* allocCallbacks; + uint16_t* pageAllocs; + }; + + VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity); + ~VmaBlockBufferImageGranularity(); + + bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; } + + void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size); + // Before destroying object you must call free it's memory + void Destroy(const VkAllocationCallbacks* pAllocationCallbacks); + + void RoundupAllocRequest(VmaSuballocationType allocType, + VkDeviceSize& inOutAllocSize, + VkDeviceSize& inOutAllocAlignment) const; + + bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, + VkDeviceSize allocSize, + VkDeviceSize blockOffset, + VkDeviceSize blockSize, + VmaSuballocationType allocType) const; + + void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size); + void FreePages(VkDeviceSize offset, VkDeviceSize size); + void Clear(); + + ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks, + bool isVirutal) const; + bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const; + bool FinishValidation(ValidationContext& ctx) const; + +private: + static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256; + + struct RegionInfo + { + uint8_t allocType; + uint16_t allocCount; + }; + + VkDeviceSize m_BufferImageGranularity; + uint32_t m_RegionCount; + RegionInfo* m_RegionInfo; + + uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); } + uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); } + + uint32_t OffsetToPageIndex(VkDeviceSize offset) const; + void AllocPage(RegionInfo& page, uint8_t allocType); +}; + +#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS +VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity) + : m_BufferImageGranularity(bufferImageGranularity), + m_RegionCount(0), + m_RegionInfo(VMA_NULL) {} + +VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity() +{ + VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!"); +} + +void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size) +{ + if (IsEnabled()) + { + m_RegionCount = static_cast(VmaDivideRoundingUp(size, m_BufferImageGranularity)); + m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount); + memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); + } +} + +void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks) +{ + if (m_RegionInfo) + { + vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount); + m_RegionInfo = VMA_NULL; + } +} + +void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType, + VkDeviceSize& inOutAllocSize, + VkDeviceSize& inOutAllocAlignment) const +{ + if (m_BufferImageGranularity > 1 && + m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY) + { + if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) + { + inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity); + inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity); + } + } +} + +bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, + VkDeviceSize allocSize, + VkDeviceSize blockOffset, + VkDeviceSize blockSize, + VmaSuballocationType allocType) const +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(inOutAllocOffset); + if (m_RegionInfo[startPage].allocCount > 0 && + VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[startPage].allocType), allocType)) + { + inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity); + if (blockSize < allocSize + inOutAllocOffset - blockOffset) + return true; + ++startPage; + } + uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize); + if (endPage != startPage && + m_RegionInfo[endPage].allocCount > 0 && + VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[endPage].allocType), allocType)) + { + return true; + } + } + return false; +} + +void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size) +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(offset); + AllocPage(m_RegionInfo[startPage], allocType); + + uint32_t endPage = GetEndPage(offset, size); + if (startPage != endPage) + AllocPage(m_RegionInfo[endPage], allocType); + } +} + +void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size) +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(offset); + --m_RegionInfo[startPage].allocCount; + if (m_RegionInfo[startPage].allocCount == 0) + m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; + uint32_t endPage = GetEndPage(offset, size); + if (startPage != endPage) + { + --m_RegionInfo[endPage].allocCount; + if (m_RegionInfo[endPage].allocCount == 0) + m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; + } + } +} + +void VmaBlockBufferImageGranularity::Clear() +{ + if (m_RegionInfo) + memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); +} + +VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation( + const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const +{ + ValidationContext ctx{ pAllocationCallbacks, VMA_NULL }; + if (!isVirutal && IsEnabled()) + { + ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount); + memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t)); + } + return ctx; +} + +bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx, + VkDeviceSize offset, VkDeviceSize size) const +{ + if (IsEnabled()) + { + uint32_t start = GetStartPage(offset); + ++ctx.pageAllocs[start]; + VMA_VALIDATE(m_RegionInfo[start].allocCount > 0); + + uint32_t end = GetEndPage(offset, size); + if (start != end) + { + ++ctx.pageAllocs[end]; + VMA_VALIDATE(m_RegionInfo[end].allocCount > 0); + } + } + return true; +} + +bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const +{ + // Check proper page structure + if (IsEnabled()) + { + VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!"); + + for (uint32_t page = 0; page < m_RegionCount; ++page) + { + VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount); + } + vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount); + ctx.pageAllocs = VMA_NULL; + } + return true; +} + +uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const +{ + return static_cast(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity)); +} + +void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType) +{ + // When current alloc type is free then it can be overridden by new type + if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE)) + page.allocType = allocType; + + ++page.allocCount; +} +#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS +#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY + +#ifndef _VMA_BLOCK_METADATA_LINEAR +/* +Allocations and their references in internal data structure look like this: + +if(m_2ndVectorMode == SECOND_VECTOR_EMPTY): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER): + + 0 +-------+ + | Alloc | 2nd[0] + +-------+ + | Alloc | 2nd[1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[1] + +-------+ + | Alloc | 2nd[0] +GetSize() +-------+ + +*/ +class VmaBlockMetadata_Linear : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Linear) +public: + VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata_Linear() = default; + + VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } + bool IsEmpty() const override { return GetAllocationCount() == 0; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; } + + void Init(VkDeviceSize size) override; + bool Validate() const override; + size_t GetAllocationCount() const override; + size_t GetFreeRegionsCount() const override; + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + VkResult CheckCorruption(const void* pBlockData) override; + + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + void DebugLogAllAllocations() const override; + +private: + /* + There are two suballocation vectors, used in ping-pong way. + The one with index m_1stVectorIndex is called 1st. + The one with index (m_1stVectorIndex ^ 1) is called 2nd. + 2nd can be non-empty only when 1st is not empty. + When 2nd is not empty, m_2ndVectorMode indicates its mode of operation. + */ + typedef VmaVector> SuballocationVectorType; + + enum SECOND_VECTOR_MODE + { + SECOND_VECTOR_EMPTY, + /* + Suballocations in 2nd vector are created later than the ones in 1st, but they + all have smaller offset. + */ + SECOND_VECTOR_RING_BUFFER, + /* + Suballocations in 2nd vector are upper side of double stack. + They all have offsets higher than those in 1st vector. + Top of this stack means smaller offsets, but higher indices in this vector. + */ + SECOND_VECTOR_DOUBLE_STACK, + }; + + VkDeviceSize m_SumFreeSize; + SuballocationVectorType m_Suballocations0, m_Suballocations1; + uint32_t m_1stVectorIndex; + SECOND_VECTOR_MODE m_2ndVectorMode; + // Number of items in 1st vector with hAllocation = null at the beginning. + size_t m_1stNullItemsBeginCount; + // Number of other items in 1st vector with hAllocation = null somewhere in the middle. + size_t m_1stNullItemsMiddleCount; + // Number of items in 2nd vector with hAllocation = null. + size_t m_2ndNullItemsCount; + + SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + + VmaSuballocation& FindSuballocation(VkDeviceSize offset) const; + bool ShouldCompact1st() const; + void CleanupAfterFree(); + + bool CreateAllocationRequest_LowerAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + bool CreateAllocationRequest_UpperAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); +}; + +#ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS +VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_SumFreeSize(0), + m_Suballocations0(VmaStlAllocator(pAllocationCallbacks)), + m_Suballocations1(VmaStlAllocator(pAllocationCallbacks)), + m_1stVectorIndex(0), + m_2ndVectorMode(SECOND_VECTOR_EMPTY), + m_1stNullItemsBeginCount(0), + m_1stNullItemsMiddleCount(0), + m_2ndNullItemsCount(0) {} + +void VmaBlockMetadata_Linear::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + m_SumFreeSize = size; +} + +bool VmaBlockMetadata_Linear::Validate() const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY)); + VMA_VALIDATE(!suballocations1st.empty() || + suballocations2nd.empty() || + m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); + + if (!suballocations1st.empty()) + { + // Null item at the beginning should be accounted into m_1stNullItemsBeginCount. + VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE); + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE); + } + if (!suballocations2nd.empty()) + { + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE); + } + + VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size()); + VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size()); + + VkDeviceSize sumUsedSize = 0; + const size_t suballoc1stCount = suballocations1st.size(); + const VkDeviceSize debugMargin = GetDebugMargin(); + VkDeviceSize offset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for (size_t i = 0; i < suballoc2ndCount; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE && + suballoc.userData == VMA_NULL); + } + + size_t nullItem1stCount = m_1stNullItemsBeginCount; + + for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem1stCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount); + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for (size_t i = suballoc2ndCount; i--; ) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + VMA_VALIDATE(offset <= GetSize()); + VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize); + + return true; +} + +size_t VmaBlockMetadata_Linear::GetAllocationCount() const +{ + return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount + + AccessSuballocations2nd().size() - m_2ndNullItemsCount; +} + +size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return SIZE_MAX; +} + +void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += size; + + VkDeviceSize lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + if (lastOffset < freeSpace2ndTo1stEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + if (lastOffset < freeSpace1stTo2ndEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to size. + if (lastOffset < size) + { + const VkDeviceSize unusedRangeSize = size - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } +} + +void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VkDeviceSize size = GetSize(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + inoutStats.blockCount++; + inoutStats.blockBytes += size; + inoutStats.allocationBytes += size - m_SumFreeSize; + + VkDeviceSize lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + // End of loop. + lastOffset = size; + } + } + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + // FIRST PASS + + size_t unusedRangeCount = 0; + VkDeviceSize usedBytes = 0; + + VkDeviceSize lastOffset = 0; + + size_t alloc2ndCount = 0; + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + size_t alloc1stCount = 0; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc1stCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < size) + { + // There is free space from lastOffset to size. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = size; + } + } + } + + const VkDeviceSize unusedBytes = size - usedBytes; + PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount); + + // SECOND PASS + lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + nextAlloc1stIndex = m_1stNullItemsBeginCount; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < size) + { + // There is free space from lastOffset to size. + const VkDeviceSize unusedRangeSize = size - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } + + PrintDetailedMap_End(json); +} +#endif // VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Linear::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(pAllocationRequest != VMA_NULL); + VMA_HEAVY_ASSERT(Validate()); + + if(allocSize > GetSize()) + return false; + + pAllocationRequest->size = allocSize; + return upperAddress ? + CreateAllocationRequest_UpperAddress( + allocSize, allocAlignment, allocType, strategy, pAllocationRequest) : + CreateAllocationRequest_LowerAddress( + allocSize, allocAlignment, allocType, strategy, pAllocationRequest); +} + +VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) +{ + VMA_ASSERT(!IsVirtual()); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_Linear::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1; + const VmaSuballocation newSuballoc = { offset, request.size, userData, type }; + + switch (request.type) + { + case VmaAllocationRequestType::UpperAddress: + { + VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER && + "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + suballocations2nd.push_back(newSuballoc); + m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK; + } + break; + case VmaAllocationRequestType::EndOf1st: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + + VMA_ASSERT(suballocations1st.empty() || + offset >= suballocations1st.back().offset + suballocations1st.back().size); + // Check if it fits before the end of the block. + VMA_ASSERT(offset + request.size <= GetSize()); + + suballocations1st.push_back(newSuballoc); + } + break; + case VmaAllocationRequestType::EndOf2nd: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector. + VMA_ASSERT(!suballocations1st.empty() && + offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + switch (m_2ndVectorMode) + { + case SECOND_VECTOR_EMPTY: + // First allocation from second part ring buffer. + VMA_ASSERT(suballocations2nd.empty()); + m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER; + break; + case SECOND_VECTOR_RING_BUFFER: + // 2-part ring buffer is already started. + VMA_ASSERT(!suballocations2nd.empty()); + break; + case SECOND_VECTOR_DOUBLE_STACK: + VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack."); + break; + default: + VMA_ASSERT(0); + } + + suballocations2nd.push_back(newSuballoc); + } + break; + default: + VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR."); + } + + m_SumFreeSize -= newSuballoc.size; +} + +void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle) +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + VkDeviceSize offset = (VkDeviceSize)allocHandle - 1; + + if (!suballocations1st.empty()) + { + // First allocation: Mark it as next empty at the beginning. + VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; + if (firstSuballoc.offset == offset) + { + firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + firstSuballoc.userData = VMA_NULL; + m_SumFreeSize += firstSuballoc.size; + ++m_1stNullItemsBeginCount; + CleanupAfterFree(); + return; + } + } + + // Last allocation in 2-part ring buffer or top of upper stack (same logic). + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER || + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + VmaSuballocation& lastSuballoc = suballocations2nd.back(); + if (lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations2nd.pop_back(); + CleanupAfterFree(); + return; + } + } + // Last allocation in 1st vector. + else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY) + { + VmaSuballocation& lastSuballoc = suballocations1st.back(); + if (lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations1st.pop_back(); + CleanupAfterFree(); + return; + } + } + + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + + // Item from the middle of 1st vector. + { + const SuballocationVectorType::iterator it = VmaBinaryFindSorted( + suballocations1st.begin() + m_1stNullItemsBeginCount, + suballocations1st.end(), + refSuballoc, + VmaSuballocationOffsetLess()); + if (it != suballocations1st.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->userData = VMA_NULL; + ++m_1stNullItemsMiddleCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) + { + // Item from the middle of 2nd vector. + const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + if (it != suballocations2nd.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->userData = VMA_NULL; + ++m_2ndNullItemsCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + VMA_ASSERT(0 && "Allocation to free not found in linear allocator!"); +} + +void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + outInfo.offset = (VkDeviceSize)allocHandle - 1; + VmaSuballocation& suballoc = FindSuballocation(outInfo.offset); + outInfo.size = suballoc.size; + outInfo.pUserData = suballoc.userData; +} + +void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + return FindSuballocation((VkDeviceSize)allocHandle - 1).userData; +} + +VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return VK_NULL_HANDLE; +} + +VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return 0; +} + +void VmaBlockMetadata_Linear::Clear() +{ + m_SumFreeSize = GetSize(); + m_Suballocations0.clear(); + m_Suballocations1.clear(); + // Leaving m_1stVectorIndex unchanged - it doesn't matter. + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + m_2ndNullItemsCount = 0; +} + +void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1); + suballoc.userData = userData; +} + +void VmaBlockMetadata_Linear::DebugLogAllAllocations() const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it) + if (it->type != VMA_SUBALLOCATION_TYPE_FREE) + DebugLogAllocation(it->offset, it->size, it->userData); + + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it) + if (it->type != VMA_SUBALLOCATION_TYPE_FREE) + DebugLogAllocation(it->offset, it->size, it->userData); +} + +VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + + // Item from the 1st vector. + { + SuballocationVectorType::const_iterator it = VmaBinaryFindSorted( + suballocations1st.begin() + m_1stNullItemsBeginCount, + suballocations1st.end(), + refSuballoc, + VmaSuballocationOffsetLess()); + if (it != suballocations1st.end()) + { + return const_cast(*it); + } + } + + if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) + { + // Rest of members stays uninitialized intentionally for better performance. + SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + if (it != suballocations2nd.end()) + { + return const_cast(*it); + } + } + + VMA_ASSERT(0 && "Allocation not found in linear allocator!"); + return const_cast(suballocations1st.back()); // Should never occur. +} + +bool VmaBlockMetadata_Linear::ShouldCompact1st() const +{ + const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + const size_t suballocCount = AccessSuballocations1st().size(); + return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3; +} + +void VmaBlockMetadata_Linear::CleanupAfterFree() +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (IsEmpty()) + { + suballocations1st.clear(); + suballocations2nd.clear(); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + m_2ndNullItemsCount = 0; + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + else + { + const size_t suballoc1stCount = suballocations1st.size(); + const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + VMA_ASSERT(nullItem1stCount <= suballoc1stCount); + + // Find more null items at the beginning of 1st vector. + while (m_1stNullItemsBeginCount < suballoc1stCount && + suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + + // Find more null items at the end of 1st vector. + while (m_1stNullItemsMiddleCount > 0 && + suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_1stNullItemsMiddleCount; + suballocations1st.pop_back(); + } + + // Find more null items at the end of 2nd vector. + while (m_2ndNullItemsCount > 0 && + suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_2ndNullItemsCount; + suballocations2nd.pop_back(); + } + + // Find more null items at the beginning of 2nd vector. + while (m_2ndNullItemsCount > 0 && + suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_2ndNullItemsCount; + VmaVectorRemove(suballocations2nd, 0); + } + + if (ShouldCompact1st()) + { + const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount; + size_t srcIndex = m_1stNullItemsBeginCount; + for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex) + { + while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++srcIndex; + } + if (dstIndex != srcIndex) + { + suballocations1st[dstIndex] = suballocations1st[srcIndex]; + } + ++srcIndex; + } + suballocations1st.resize(nonNullItemCount); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + } + + // 2nd vector became empty. + if (suballocations2nd.empty()) + { + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + + // 1st vector became empty. + if (suballocations1st.size() - m_1stNullItemsBeginCount == 0) + { + suballocations1st.clear(); + m_1stNullItemsBeginCount = 0; + + if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + // Swap 1st with 2nd. Now 2nd is empty. + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + m_1stNullItemsMiddleCount = m_2ndNullItemsCount; + while (m_1stNullItemsBeginCount < suballocations2nd.size() && + suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + m_2ndNullItemsCount = 0; + m_1stVectorIndex ^= 1; + } + } + } + + VMA_HEAVY_ASSERT(Validate()); +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize blockSize = GetSize(); + const VkDeviceSize debugMargin = GetDebugMargin(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + // Try to allocate at the end of 1st vector. + + VkDeviceSize resultBaseOffset = 0; + if (!suballocations1st.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations1st.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? + suballocations2nd.back().offset : blockSize; + + // There is enough free space at the end after alignment. + if (resultOffset + allocSize + debugMargin <= freeSpaceEnd) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on previous page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + // pAllocationRequest->item, customData unused. + pAllocationRequest->type = VmaAllocationRequestType::EndOf1st; + return true; + } + } + + // Wrap-around to end of 2nd vector. Try to allocate there, watching for the + // beginning of 1st vector as the end of free space. + if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(!suballocations1st.empty()); + + VkDeviceSize resultBaseOffset = 0; + if (!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + size_t index1st = m_1stNullItemsBeginCount; + + // There is enough free space at the end after alignment. + if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) || + (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset)) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) + { + for (size_t nextSuballocIndex = index1st; + nextSuballocIndex < suballocations1st.size(); + nextSuballocIndex++) + { + const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd; + // pAllocationRequest->item, customData unused. + return true; + } + } + + return false; +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize blockSize = GetSize(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer."); + return false; + } + + // Try to allocate before 2nd.back(), or end of block if 2nd.empty(). + if (allocSize > blockSize) + { + return false; + } + VkDeviceSize resultBaseOffset = blockSize - allocSize; + if (!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset - allocSize; + if (allocSize > lastSuballoc.offset) + { + return false; + } + } + + // Start from offset equal to end of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + const VkDeviceSize debugMargin = GetDebugMargin(); + + // Apply debugMargin at the end. + if (debugMargin > 0) + { + if (resultOffset < debugMargin) + { + return false; + } + resultOffset -= debugMargin; + } + + // Apply alignment. + resultOffset = VmaAlignDown(resultOffset, allocAlignment); + + // Check next suballocations from 2nd for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity); + } + } + + // There is enough free space. + const VkDeviceSize endOf1st = !suballocations1st.empty() ? + suballocations1st.back().offset + suballocations1st.back().size : + 0; + if (endOf1st + debugMargin <= resultOffset) + { + // Check previous suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if (bufferImageGranularity > 1) + { + for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + // pAllocationRequest->item unused. + pAllocationRequest->type = VmaAllocationRequestType::UpperAddress; + return true; + } + + return false; +} +#endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_LINEAR + +#ifndef _VMA_BLOCK_METADATA_TLSF +// To not search current larger region if first allocation won't succeed and skip to smaller range +// use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest(). +// When fragmentation and reusal of previous blocks doesn't matter then use with +// VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible. +class VmaBlockMetadata_TLSF : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_TLSF) +public: + VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata_TLSF(); + + size_t GetAllocationCount() const override { return m_AllocCount; } + size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; } + VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; } + bool IsEmpty() const override { return m_NullBlock->offset == 0; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; } + + void Init(VkDeviceSize size) override; + bool Validate() const override; + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + VkResult CheckCorruption(const void* pBlockData) override; + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + void DebugLogAllAllocations() const override; + +private: + // According to original paper it should be preferable 4 or 5: + // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems" + // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf + static const uint8_t SECOND_LEVEL_INDEX = 5; + static const uint16_t SMALL_BUFFER_SIZE = 256; + static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16; + static const uint8_t MEMORY_CLASS_SHIFT = 7; + static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT; + + class Block + { + public: + VkDeviceSize offset; + VkDeviceSize size; + Block* prevPhysical; + Block* nextPhysical; + + void MarkFree() { prevFree = VMA_NULL; } + void MarkTaken() { prevFree = this; } + bool IsFree() const { return prevFree != this; } + void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; } + Block*& PrevFree() { return prevFree; } + Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; } + + private: + Block* prevFree; // Address of the same block here indicates that block is taken + union + { + Block* nextFree; + void* userData; + }; + }; + + size_t m_AllocCount; + // Total number of free blocks besides null block + size_t m_BlocksFreeCount; + // Total size of free blocks excluding null block + VkDeviceSize m_BlocksFreeSize; + uint32_t m_IsFreeBitmap; + uint8_t m_MemoryClasses; + uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES]; + uint32_t m_ListsCount; + /* + * 0: 0-3 lists for small buffers + * 1+: 0-(2^SLI-1) lists for normal buffers + */ + Block** m_FreeList; + VmaPoolAllocator m_BlockAllocator; + Block* m_NullBlock; + VmaBlockBufferImageGranularity m_GranularityHandler; + + uint8_t SizeToMemoryClass(VkDeviceSize size) const; + uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const; + uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const; + uint32_t GetListIndex(VkDeviceSize size) const; + + void RemoveFreeBlock(Block* block); + void InsertFreeBlock(Block* block); + void MergeBlock(Block* block, Block* prev); + + Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const; + bool CheckBlock( + Block& block, + uint32_t listIndex, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaAllocationRequest* pAllocationRequest); +}; + +#ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS +VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_AllocCount(0), + m_BlocksFreeCount(0), + m_BlocksFreeSize(0), + m_IsFreeBitmap(0), + m_MemoryClasses(0), + m_ListsCount(0), + m_FreeList(VMA_NULL), + m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT), + m_NullBlock(VMA_NULL), + m_GranularityHandler(bufferImageGranularity) {} + +VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF() +{ + if (m_FreeList) + vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount); + m_GranularityHandler.Destroy(GetAllocationCallbacks()); +} + +void VmaBlockMetadata_TLSF::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + if (!IsVirtual()) + m_GranularityHandler.Init(GetAllocationCallbacks(), size); + + m_NullBlock = m_BlockAllocator.Alloc(); + m_NullBlock->size = size; + m_NullBlock->offset = 0; + m_NullBlock->prevPhysical = VMA_NULL; + m_NullBlock->nextPhysical = VMA_NULL; + m_NullBlock->MarkFree(); + m_NullBlock->NextFree() = VMA_NULL; + m_NullBlock->PrevFree() = VMA_NULL; + uint8_t memoryClass = SizeToMemoryClass(size); + uint16_t sli = SizeToSecondIndex(size, memoryClass); + m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1; + if (IsVirtual()) + m_ListsCount += 1UL << SECOND_LEVEL_INDEX; + else + m_ListsCount += 4; + + m_MemoryClasses = memoryClass + uint8_t(2); + memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t)); + + m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount); + memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); +} + +bool VmaBlockMetadata_TLSF::Validate() const +{ + VMA_VALIDATE(GetSumFreeSize() <= GetSize()); + + VkDeviceSize calculatedSize = m_NullBlock->size; + VkDeviceSize calculatedFreeSize = m_NullBlock->size; + size_t allocCount = 0; + size_t freeCount = 0; + + // Check integrity of free lists + for (uint32_t list = 0; list < m_ListsCount; ++list) + { + Block* block = m_FreeList[list]; + if (block != VMA_NULL) + { + VMA_VALIDATE(block->IsFree()); + VMA_VALIDATE(block->PrevFree() == VMA_NULL); + while (block->NextFree()) + { + VMA_VALIDATE(block->NextFree()->IsFree()); + VMA_VALIDATE(block->NextFree()->PrevFree() == block); + block = block->NextFree(); + } + } + } + + VkDeviceSize nextOffset = m_NullBlock->offset; + auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual()); + + VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL); + if (m_NullBlock->prevPhysical) + { + VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock); + } + // Check all blocks + for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical) + { + VMA_VALIDATE(prev->offset + prev->size == nextOffset); + nextOffset = prev->offset; + calculatedSize += prev->size; + + uint32_t listIndex = GetListIndex(prev->size); + if (prev->IsFree()) + { + ++freeCount; + // Check if free block belongs to free list + Block* freeBlock = m_FreeList[listIndex]; + VMA_VALIDATE(freeBlock != VMA_NULL); + + bool found = false; + do + { + if (freeBlock == prev) + found = true; + + freeBlock = freeBlock->NextFree(); + } while (!found && freeBlock != VMA_NULL); + + VMA_VALIDATE(found); + calculatedFreeSize += prev->size; + } + else + { + ++allocCount; + // Check if taken block is not on a free list + Block* freeBlock = m_FreeList[listIndex]; + while (freeBlock) + { + VMA_VALIDATE(freeBlock != prev); + freeBlock = freeBlock->NextFree(); + } + + if (!IsVirtual()) + { + VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size)); + } + } + + if (prev->prevPhysical) + { + VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev); + } + } + + if (!IsVirtual()) + { + VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx)); + } + + VMA_VALIDATE(nextOffset == 0); + VMA_VALIDATE(calculatedSize == GetSize()); + VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize()); + VMA_VALIDATE(allocCount == m_AllocCount); + VMA_VALIDATE(freeCount == m_BlocksFreeCount); + + return true; +} + +void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +{ + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += GetSize(); + if (m_NullBlock->size > 0) + VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size); + + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (block->IsFree()) + VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size); + else + VmaAddDetailedStatisticsAllocation(inoutStats, block->size); + } +} + +void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const +{ + inoutStats.blockCount++; + inoutStats.allocationCount += (uint32_t)m_AllocCount; + inoutStats.blockBytes += GetSize(); + inoutStats.allocationBytes += GetSize() - GetSumFreeSize(); +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const +{ + size_t blockCount = m_AllocCount + m_BlocksFreeCount; + VmaStlAllocator allocator(GetAllocationCallbacks()); + VmaVector> blockList(blockCount, allocator); + + size_t i = blockCount; + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + blockList[--i] = block; + } + VMA_ASSERT(i == 0); + + VmaDetailedStatistics stats; + VmaClearDetailedStatistics(stats); + AddDetailedStatistics(stats); + + PrintDetailedMap_Begin(json, + stats.statistics.blockBytes - stats.statistics.allocationBytes, + stats.statistics.allocationCount, + stats.unusedRangeCount); + + for (; i < blockCount; ++i) + { + Block* block = blockList[i]; + if (block->IsFree()) + PrintDetailedMap_UnusedRange(json, block->offset, block->size); + else + PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData()); + } + if (m_NullBlock->size > 0) + PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size); + + PrintDetailedMap_End(json); +} +#endif + +bool VmaBlockMetadata_TLSF::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!"); + VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); + + // For small granularity round up + if (!IsVirtual()) + m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment); + + allocSize += GetDebugMargin(); + // Quick check for too small pool + if (allocSize > GetSumFreeSize()) + return false; + + // If no free blocks in pool then check only null block + if (m_BlocksFreeCount == 0) + return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest); + + // Round up to the next block + VkDeviceSize sizeForNextList = allocSize; + VkDeviceSize smallSizeStep = VkDeviceSize(SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4)); + if (allocSize > SMALL_BUFFER_SIZE) + { + sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX)); + } + else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep) + sizeForNextList = SMALL_BUFFER_SIZE + 1; + else + sizeForNextList += smallSizeStep; + + uint32_t nextListIndex = m_ListsCount; + uint32_t prevListIndex = m_ListsCount; + Block* nextListBlock = VMA_NULL; + Block* prevListBlock = VMA_NULL; + + // Check blocks according to strategies + if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) + { + // Quick check for larger block first + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // If not fitted then null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Null block failed, search larger bucket + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + + // Failed again, check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + } + else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT) + { + // Check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Check larger bucket + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + } + else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ) + { + // Perform search from the start + VmaStlAllocator allocator(GetAllocationCallbacks()); + VmaVector> blockList(m_BlocksFreeCount, allocator); + + size_t i = m_BlocksFreeCount; + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (block->IsFree() && block->size >= allocSize) + blockList[--i] = block; + } + + for (; i < m_BlocksFreeCount; ++i) + { + Block& block = *blockList[i]; + if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Whole range searched, no more memory + return false; + } + else + { + // Check larger bucket + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + } + + // Worst case, full search has to be done + while (++nextListIndex < m_ListsCount) + { + nextListBlock = m_FreeList[nextListIndex]; + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + } + + // No more memory sadly + return false; +} + +VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData) +{ + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (!block->IsFree()) + { + if (!VmaValidateMagicValue(pBlockData, block->offset + block->size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_TLSF::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF); + + // Get block and pop it from the free list + Block* currentBlock = (Block*)request.allocHandle; + VkDeviceSize offset = request.algorithmData; + VMA_ASSERT(currentBlock != VMA_NULL); + VMA_ASSERT(currentBlock->offset <= offset); + + if (currentBlock != m_NullBlock) + RemoveFreeBlock(currentBlock); + + VkDeviceSize debugMargin = GetDebugMargin(); + VkDeviceSize misssingAlignment = offset - currentBlock->offset; + + // Append missing alignment to prev block or create new one + if (misssingAlignment) + { + Block* prevBlock = currentBlock->prevPhysical; + VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!"); + + if (prevBlock->IsFree() && prevBlock->size != debugMargin) + { + uint32_t oldList = GetListIndex(prevBlock->size); + prevBlock->size += misssingAlignment; + // Check if new size crosses list bucket + if (oldList != GetListIndex(prevBlock->size)) + { + prevBlock->size -= misssingAlignment; + RemoveFreeBlock(prevBlock); + prevBlock->size += misssingAlignment; + InsertFreeBlock(prevBlock); + } + else + m_BlocksFreeSize += misssingAlignment; + } + else + { + Block* newBlock = m_BlockAllocator.Alloc(); + currentBlock->prevPhysical = newBlock; + prevBlock->nextPhysical = newBlock; + newBlock->prevPhysical = prevBlock; + newBlock->nextPhysical = currentBlock; + newBlock->size = misssingAlignment; + newBlock->offset = currentBlock->offset; + newBlock->MarkTaken(); + + InsertFreeBlock(newBlock); + } + + currentBlock->size -= misssingAlignment; + currentBlock->offset += misssingAlignment; + } + + VkDeviceSize size = request.size + debugMargin; + if (currentBlock->size == size) + { + if (currentBlock == m_NullBlock) + { + // Setup new null block + m_NullBlock = m_BlockAllocator.Alloc(); + m_NullBlock->size = 0; + m_NullBlock->offset = currentBlock->offset + size; + m_NullBlock->prevPhysical = currentBlock; + m_NullBlock->nextPhysical = VMA_NULL; + m_NullBlock->MarkFree(); + m_NullBlock->PrevFree() = VMA_NULL; + m_NullBlock->NextFree() = VMA_NULL; + currentBlock->nextPhysical = m_NullBlock; + currentBlock->MarkTaken(); + } + } + else + { + VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!"); + + // Create new free block + Block* newBlock = m_BlockAllocator.Alloc(); + newBlock->size = currentBlock->size - size; + newBlock->offset = currentBlock->offset + size; + newBlock->prevPhysical = currentBlock; + newBlock->nextPhysical = currentBlock->nextPhysical; + currentBlock->nextPhysical = newBlock; + currentBlock->size = size; + + if (currentBlock == m_NullBlock) + { + m_NullBlock = newBlock; + m_NullBlock->MarkFree(); + m_NullBlock->NextFree() = VMA_NULL; + m_NullBlock->PrevFree() = VMA_NULL; + currentBlock->MarkTaken(); + } + else + { + newBlock->nextPhysical->prevPhysical = newBlock; + newBlock->MarkTaken(); + InsertFreeBlock(newBlock); + } + } + currentBlock->UserData() = userData; + + if (debugMargin > 0) + { + currentBlock->size -= debugMargin; + Block* newBlock = m_BlockAllocator.Alloc(); + newBlock->size = debugMargin; + newBlock->offset = currentBlock->offset + currentBlock->size; + newBlock->prevPhysical = currentBlock; + newBlock->nextPhysical = currentBlock->nextPhysical; + newBlock->MarkTaken(); + currentBlock->nextPhysical->prevPhysical = newBlock; + currentBlock->nextPhysical = newBlock; + InsertFreeBlock(newBlock); + } + + if (!IsVirtual()) + m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData, + currentBlock->offset, currentBlock->size); + ++m_AllocCount; +} + +void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle) +{ + Block* block = (Block*)allocHandle; + Block* next = block->nextPhysical; + VMA_ASSERT(!block->IsFree() && "Block is already free!"); + + if (!IsVirtual()) + m_GranularityHandler.FreePages(block->offset, block->size); + --m_AllocCount; + + VkDeviceSize debugMargin = GetDebugMargin(); + if (debugMargin > 0) + { + RemoveFreeBlock(next); + MergeBlock(next, block); + block = next; + next = next->nextPhysical; + } + + // Try merging + Block* prev = block->prevPhysical; + if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin) + { + RemoveFreeBlock(prev); + MergeBlock(block, prev); + } + + if (!next->IsFree()) + InsertFreeBlock(block); + else if (next == m_NullBlock) + MergeBlock(m_NullBlock, block); + else + { + RemoveFreeBlock(next); + MergeBlock(next, block); + InsertFreeBlock(next); + } +} + +void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!"); + outInfo.offset = block->offset; + outInfo.size = block->size; + outInfo.pUserData = block->UserData(); +} + +void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!"); + return block->UserData(); +} + +VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const +{ + if (m_AllocCount == 0) + return VK_NULL_HANDLE; + + for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical) + { + if (!block->IsFree()) + return (VmaAllocHandle)block; + } + VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!"); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + Block* startBlock = (Block*)prevAlloc; + VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!"); + + for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical) + { + if (!block->IsFree()) + return (VmaAllocHandle)block; + } + return VK_NULL_HANDLE; +} + +VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const +{ + Block* block = (Block*)alloc; + VMA_ASSERT(!block->IsFree() && "Incorrect block!"); + + if (block->prevPhysical) + return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0; + return 0; +} + +void VmaBlockMetadata_TLSF::Clear() +{ + m_AllocCount = 0; + m_BlocksFreeCount = 0; + m_BlocksFreeSize = 0; + m_IsFreeBitmap = 0; + m_NullBlock->offset = 0; + m_NullBlock->size = GetSize(); + Block* block = m_NullBlock->prevPhysical; + m_NullBlock->prevPhysical = VMA_NULL; + while (block) + { + Block* prev = block->prevPhysical; + m_BlockAllocator.Free(block); + block = prev; + } + memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); + memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t)); + m_GranularityHandler.Clear(); +} + +void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!"); + block->UserData() = userData; +} + +void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const +{ + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + if (!block->IsFree()) + DebugLogAllocation(block->offset, block->size, block->UserData()); +} + +uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const +{ + if (size > SMALL_BUFFER_SIZE) + return uint8_t(VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT); + return 0; +} + +uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const +{ + if (memoryClass == 0) + { + if (IsVirtual()) + return static_cast((size - 1) / 8); + else + return static_cast((size - 1) / 64); + } + return static_cast((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX)); +} + +uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const +{ + if (memoryClass == 0) + return secondIndex; + + const uint32_t index = static_cast(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex; + if (IsVirtual()) + return index + (1 << SECOND_LEVEL_INDEX); + else + return index + 4; +} + +uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const +{ + uint8_t memoryClass = SizeToMemoryClass(size); + return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass)); +} + +void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block) +{ + VMA_ASSERT(block != m_NullBlock); + VMA_ASSERT(block->IsFree()); + + if (block->NextFree() != VMA_NULL) + block->NextFree()->PrevFree() = block->PrevFree(); + if (block->PrevFree() != VMA_NULL) + block->PrevFree()->NextFree() = block->NextFree(); + else + { + uint8_t memClass = SizeToMemoryClass(block->size); + uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); + uint32_t index = GetListIndex(memClass, secondIndex); + VMA_ASSERT(m_FreeList[index] == block); + m_FreeList[index] = block->NextFree(); + if (block->NextFree() == VMA_NULL) + { + m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex); + if (m_InnerIsFreeBitmap[memClass] == 0) + m_IsFreeBitmap &= ~(1UL << memClass); + } + } + block->MarkTaken(); + block->UserData() = VMA_NULL; + --m_BlocksFreeCount; + m_BlocksFreeSize -= block->size; +} + +void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block) +{ + VMA_ASSERT(block != m_NullBlock); + VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!"); + + uint8_t memClass = SizeToMemoryClass(block->size); + uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); + uint32_t index = GetListIndex(memClass, secondIndex); + VMA_ASSERT(index < m_ListsCount); + block->PrevFree() = VMA_NULL; + block->NextFree() = m_FreeList[index]; + m_FreeList[index] = block; + if (block->NextFree() != VMA_NULL) + block->NextFree()->PrevFree() = block; + else + { + m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex; + m_IsFreeBitmap |= 1UL << memClass; + } + ++m_BlocksFreeCount; + m_BlocksFreeSize += block->size; +} + +void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev) +{ + VMA_ASSERT(block->prevPhysical == prev && "Cannot merge separate physical regions!"); + VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!"); + + block->offset = prev->offset; + block->size += prev->size; + block->prevPhysical = prev->prevPhysical; + if (block->prevPhysical) + block->prevPhysical->nextPhysical = block; + m_BlockAllocator.Free(prev); +} + +VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const +{ + uint8_t memoryClass = SizeToMemoryClass(size); + uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass)); + if (!innerFreeMap) + { + // Check higher levels for available blocks + uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1)); + if (!freeMap) + return VMA_NULL; // No more memory available + + // Find lowest free region + memoryClass = VMA_BITSCAN_LSB(freeMap); + innerFreeMap = m_InnerIsFreeBitmap[memoryClass]; + VMA_ASSERT(innerFreeMap != 0); + } + // Find lowest free subregion + listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap)); + VMA_ASSERT(m_FreeList[listIndex]); + return m_FreeList[listIndex]; +} + +bool VmaBlockMetadata_TLSF::CheckBlock( + Block& block, + uint32_t listIndex, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(block.IsFree() && "Block is already taken!"); + + VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment); + if (block.size < allocSize + alignedOffset - block.offset) + return false; + + // Check for granularity conflicts + if (!IsVirtual() && + m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType)) + return false; + + // Alloc successful + pAllocationRequest->type = VmaAllocationRequestType::TLSF; + pAllocationRequest->allocHandle = (VmaAllocHandle)█ + pAllocationRequest->size = allocSize - GetDebugMargin(); + pAllocationRequest->customData = (void*)allocType; + pAllocationRequest->algorithmData = alignedOffset; + + // Place block at the start of list if it's normal block + if (listIndex != m_ListsCount && block.PrevFree()) + { + block.PrevFree()->NextFree() = block.NextFree(); + if (block.NextFree()) + block.NextFree()->PrevFree() = block.PrevFree(); + block.PrevFree() = VMA_NULL; + block.NextFree() = m_FreeList[listIndex]; + m_FreeList[listIndex] = █ + if (block.NextFree()) + block.NextFree()->PrevFree() = █ + } + + return true; +} +#endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_TLSF + +#ifndef _VMA_BLOCK_VECTOR +/* +Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific +Vulkan memory type. + +Synchronized internally with a mutex. +*/ +class VmaBlockVector +{ + friend struct VmaDefragmentationContext_T; + VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockVector) +public: + VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + bool explicitBlockSize, + uint32_t algorithm, + float priority, + VkDeviceSize minAllocationAlignment, + void* pMemoryAllocateNext); + ~VmaBlockVector(); + + VmaAllocator GetAllocator() const { return m_hAllocator; } + VmaPool GetParentPool() const { return m_hParentPool; } + bool IsCustomPool() const { return m_hParentPool != VMA_NULL; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; } + VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } + uint32_t GetAlgorithm() const { return m_Algorithm; } + bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; } + float GetPriority() const { return m_Priority; } + const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; } + // To be used only while the m_Mutex is locked. Used during defragmentation. + size_t GetBlockCount() const { return m_Blocks.size(); } + // To be used only while the m_Mutex is locked. Used during defragmentation. + VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } + VMA_RW_MUTEX &GetMutex() { return m_Mutex; } + + VkResult CreateMinBlocks(); + void AddStatistics(VmaStatistics& inoutStats); + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); + bool IsEmpty(); + bool IsCorruptionDetectionEnabled() const; + + VkResult Allocate( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + void Free(const VmaAllocation hAllocation); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + VkResult CheckCorruption(); + +private: + const VmaAllocator m_hAllocator; + const VmaPool m_hParentPool; + const uint32_t m_MemoryTypeIndex; + const VkDeviceSize m_PreferredBlockSize; + const size_t m_MinBlockCount; + const size_t m_MaxBlockCount; + const VkDeviceSize m_BufferImageGranularity; + const bool m_ExplicitBlockSize; + const uint32_t m_Algorithm; + const float m_Priority; + const VkDeviceSize m_MinAllocationAlignment; + + void* const m_pMemoryAllocateNext; + VMA_RW_MUTEX m_Mutex; + // Incrementally sorted by sumFreeSize, ascending. + VmaVector> m_Blocks; + uint32_t m_NextBlockId; + bool m_IncrementalSort = true; + + void SetIncrementalSort(bool val) { m_IncrementalSort = val; } + + VkDeviceSize CalcMaxBlockSize() const; + // Finds and removes given block from vector. + void Remove(VmaDeviceMemoryBlock* pBlock); + // Performs single step in sorting m_Blocks. They may not be fully sorted + // after this call. + void IncrementallySortBlocks(); + void SortByFreeSize(); + + VkResult AllocatePage( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); + + VkResult AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation); + + VkResult CommitAllocationRequest( + VmaAllocationRequest& allocRequest, + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); + + VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); + bool HasEmptyBlock(); +}; +#endif // _VMA_BLOCK_VECTOR + +#ifndef _VMA_DEFRAGMENTATION_CONTEXT +struct VmaDefragmentationContext_T +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaDefragmentationContext_T) +public: + VmaDefragmentationContext_T( + VmaAllocator hAllocator, + const VmaDefragmentationInfo& info); + ~VmaDefragmentationContext_T(); + + void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; } + + VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo); + VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo); + +private: + // Max number of allocations to ignore due to size constraints before ending single pass + static const uint8_t MAX_ALLOCS_TO_IGNORE = 16; + enum class CounterStatus { Pass, Ignore, End }; + + struct FragmentedBlock + { + uint32_t data; + VmaDeviceMemoryBlock* block; + }; + struct StateBalanced + { + VkDeviceSize avgFreeSize = 0; + VkDeviceSize avgAllocSize = UINT64_MAX; + }; + struct StateExtensive + { + enum class Operation : uint8_t + { + FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll, + MoveBuffers, MoveTextures, MoveAll, + Cleanup, Done + }; + + Operation operation = Operation::FindFreeBlockTexture; + size_t firstFreeBlock = SIZE_MAX; + }; + struct MoveAllocationData + { + VkDeviceSize size; + VkDeviceSize alignment; + VmaSuballocationType type; + VmaAllocationCreateFlags flags; + VmaDefragmentationMove move = {}; + }; + + const VkDeviceSize m_MaxPassBytes; + const uint32_t m_MaxPassAllocations; + const PFN_vmaCheckDefragmentationBreakFunction m_BreakCallback; + void* m_BreakCallbackUserData; + + VmaStlAllocator m_MoveAllocator; + VmaVector> m_Moves; + + uint8_t m_IgnoredAllocs = 0; + uint32_t m_Algorithm; + uint32_t m_BlockVectorCount; + VmaBlockVector* m_PoolBlockVector; + VmaBlockVector** m_pBlockVectors; + size_t m_ImmovableBlockCount = 0; + VmaDefragmentationStats m_GlobalStats = { 0 }; + VmaDefragmentationStats m_PassStats = { 0 }; + void* m_AlgorithmState = VMA_NULL; + + static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata); + CounterStatus CheckCounters(VkDeviceSize bytes); + bool IncrementCounters(VkDeviceSize bytes); + bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block); + bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector); + + bool ComputeDefragmentation(VmaBlockVector& vector, size_t index); + bool ComputeDefragmentation_Fast(VmaBlockVector& vector); + bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update); + bool ComputeDefragmentation_Full(VmaBlockVector& vector); + bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index); + + void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state); + bool MoveDataToFreeBlocks(VmaSuballocationType currentType, + VmaBlockVector& vector, size_t firstFreeBlock, + bool& texturePresent, bool& bufferPresent, bool& otherPresent); +}; +#endif // _VMA_DEFRAGMENTATION_CONTEXT + +#ifndef _VMA_POOL_T +struct VmaPool_T +{ + friend struct VmaPoolListItemTraits; + VMA_CLASS_NO_COPY_NO_MOVE(VmaPool_T) +public: + VmaBlockVector m_BlockVector; + VmaDedicatedAllocationList m_DedicatedAllocations; + + VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize); + ~VmaPool_T(); + + uint32_t GetId() const { return m_Id; } + void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; } + + const char* GetName() const { return m_Name; } + void SetName(const char* pName); + +#if VMA_STATS_STRING_ENABLED + //void PrintDetailedMap(class VmaStringBuilder& sb); +#endif + +private: + uint32_t m_Id; + char* m_Name; + VmaPool_T* m_PrevPool = VMA_NULL; + VmaPool_T* m_NextPool = VMA_NULL; +}; + +struct VmaPoolListItemTraits +{ + typedef VmaPool_T ItemType; + + static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; } + static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; } + static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; } + static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; } +}; +#endif // _VMA_POOL_T + +#ifndef _VMA_CURRENT_BUDGET_DATA +struct VmaCurrentBudgetData +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaCurrentBudgetData) +public: + + VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS]; + +#if VMA_MEMORY_BUDGET + VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch; + VMA_RW_MUTEX m_BudgetMutex; + uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS]; + uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS]; + uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS]; +#endif // VMA_MEMORY_BUDGET + + VmaCurrentBudgetData(); + + void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); + void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); +}; + +#ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS +VmaCurrentBudgetData::VmaCurrentBudgetData() +{ + for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex) + { + m_BlockCount[heapIndex] = 0; + m_AllocationCount[heapIndex] = 0; + m_BlockBytes[heapIndex] = 0; + m_AllocationBytes[heapIndex] = 0; +#if VMA_MEMORY_BUDGET + m_VulkanUsage[heapIndex] = 0; + m_VulkanBudget[heapIndex] = 0; + m_BlockBytesAtBudgetFetch[heapIndex] = 0; +#endif + } + +#if VMA_MEMORY_BUDGET + m_OperationsSinceBudgetFetch = 0; +#endif +} + +void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) +{ + m_AllocationBytes[heapIndex] += allocationSize; + ++m_AllocationCount[heapIndex]; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif +} + +void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) +{ + VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); + m_AllocationBytes[heapIndex] -= allocationSize; + VMA_ASSERT(m_AllocationCount[heapIndex] > 0); + --m_AllocationCount[heapIndex]; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif +} +#endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS +#endif // _VMA_CURRENT_BUDGET_DATA + +#ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR +/* +Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects. +*/ +class VmaAllocationObjectAllocator +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocationObjectAllocator) +public: + VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) + : m_Allocator(pAllocationCallbacks, 1024) {} + + template VmaAllocation Allocate(Types&&... args); + void Free(VmaAllocation hAlloc); + +private: + VMA_MUTEX m_Mutex; + VmaPoolAllocator m_Allocator; +}; + +template +VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args) +{ + VmaMutexLock mutexLock(m_Mutex); + return m_Allocator.Alloc(std::forward(args)...); +} + +void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) +{ + VmaMutexLock mutexLock(m_Mutex); + m_Allocator.Free(hAlloc); +} +#endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR + +#ifndef _VMA_VIRTUAL_BLOCK_T +struct VmaVirtualBlock_T +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaVirtualBlock_T) +public: + const bool m_AllocationCallbacksSpecified; + const VkAllocationCallbacks m_AllocationCallbacks; + + VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo); + ~VmaVirtualBlock_T(); + + VkResult Init() { return VK_SUCCESS; } + bool IsEmpty() const { return m_Metadata->IsEmpty(); } + void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); } + void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); } + void Clear() { m_Metadata->Clear(); } + + const VkAllocationCallbacks* GetAllocationCallbacks() const; + void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo); + VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, + VkDeviceSize* outOffset); + void GetStatistics(VmaStatistics& outStats) const; + void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const; +#if VMA_STATS_STRING_ENABLED + void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const; +#endif + +private: + VmaBlockMetadata* m_Metadata; +}; + +#ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS +VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo) + : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks) +{ + const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK; + switch (algorithm) + { + case 0: + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); + break; + case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT: + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true); + break; + default: + VMA_ASSERT(0); + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); + } + + m_Metadata->Init(createInfo.size); +} + +VmaVirtualBlock_T::~VmaVirtualBlock_T() +{ + // Define macro VMA_DEBUG_LOG_FORMAT or more specialized VMA_LEAK_LOG_FORMAT + // to receive the list of the unfreed allocations. + if (!m_Metadata->IsEmpty()) + m_Metadata->DebugLogAllAllocations(); + // This is the most important assert in the entire library. + // Hitting it means you have some memory leak - unreleased virtual allocations. + VMA_ASSERT_LEAK(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!"); + + vma_delete(GetAllocationCallbacks(), m_Metadata); +} + +const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const +{ + return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; +} + +void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo) +{ + m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo); +} + +VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, + VkDeviceSize* outOffset) +{ + VmaAllocationRequest request = {}; + if (m_Metadata->CreateAllocationRequest( + createInfo.size, // allocSize + VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment + (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress + VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant + createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy + &request)) + { + m_Metadata->Alloc(request, + VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant + createInfo.pUserData); + outAllocation = (VmaVirtualAllocation)request.allocHandle; + if(outOffset) + *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle); + return VK_SUCCESS; + } + outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE; + if (outOffset) + *outOffset = UINT64_MAX; + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const +{ + VmaClearStatistics(outStats); + m_Metadata->AddStatistics(outStats); +} + +void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const +{ + VmaClearDetailedStatistics(outStats); + m_Metadata->AddDetailedStatistics(outStats); +} + +#if VMA_STATS_STRING_ENABLED +void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const +{ + VmaJsonWriter json(GetAllocationCallbacks(), sb); + json.BeginObject(); + + VmaDetailedStatistics stats; + CalculateDetailedStatistics(stats); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats); + + if (detailedMap) + { + json.WriteString("Details"); + json.BeginObject(); + m_Metadata->PrintDetailedMap(json); + json.EndObject(); + } + + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS +#endif // _VMA_VIRTUAL_BLOCK_T + + +// Main allocator object. +struct VmaAllocator_T +{ + VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocator_T) +public: + const bool m_UseMutex; + const uint32_t m_VulkanApiVersion; + bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseExtMemoryBudget; + bool m_UseAmdDeviceCoherentMemory; + bool m_UseKhrBufferDeviceAddress; + bool m_UseExtMemoryPriority; + bool m_UseKhrMaintenance4; + bool m_UseKhrMaintenance5; + const VkDevice m_hDevice; + const VkInstance m_hInstance; + const bool m_AllocationCallbacksSpecified; + const VkAllocationCallbacks m_AllocationCallbacks; + VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks; + VmaAllocationObjectAllocator m_AllocationObjectAllocator; + + // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size. + uint32_t m_HeapSizeLimitMask; + + VkPhysicalDeviceProperties m_PhysicalDeviceProperties; + VkPhysicalDeviceMemoryProperties m_MemProps; + + // Default pools. + VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; + VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES]; + + VmaCurrentBudgetData m_Budget; + VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects. + + VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo); + VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo); + ~VmaAllocator_T(); + + const VkAllocationCallbacks* GetAllocationCallbacks() const + { + return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; + } + const VmaVulkanFunctions& GetVulkanFunctions() const + { + return m_VulkanFunctions; + } + + VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; } + + VkDeviceSize GetBufferImageGranularity() const + { + return VMA_MAX( + static_cast(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY), + m_PhysicalDeviceProperties.limits.bufferImageGranularity); + } + + uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; } + uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; } + + uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const + { + VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount); + return m_MemProps.memoryTypes[memTypeIndex].heapIndex; + } + // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT. + bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const + { + return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) == + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + // Minimum alignment for all allocations in specific memory type. + VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const + { + return IsMemoryTypeNonCoherent(memTypeIndex) ? + VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : + (VkDeviceSize)VMA_MIN_ALIGNMENT; + } + + bool IsIntegratedGpu() const + { + return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU; + } + + uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; } + + void GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + void GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + VkResult FindMemoryTypeIndex( + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VmaBufferImageUsage bufImgUsage, + uint32_t* pMemoryTypeIndex) const; + + // Main allocation function. + VkResult AllocateMemory( + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Main deallocation function. + void FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations); + + void CalculateStatistics(VmaTotalStatistics* pStats); + + void GetHeapBudgets( + VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo); + void GetAllocationInfo2(VmaAllocation hAllocation, VmaAllocationInfo2* pAllocationInfo); + + VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool); + void DestroyPool(VmaPool pool); + void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats); + void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats); + + void SetCurrentFrameIndex(uint32_t frameIndex); + uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); } + + VkResult CheckPoolCorruption(VmaPool hPool); + VkResult CheckCorruption(uint32_t memoryTypeBits); + + // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping. + VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory); + // Call to Vulkan function vkFreeMemory with accompanying bookkeeping. + void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory); + // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR. + VkResult BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext); + // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR. + VkResult BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext); + + VkResult Map(VmaAllocation hAllocation, void** ppData); + void Unmap(VmaAllocation hAllocation); + + VkResult BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); + + VkResult FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op); + VkResult FlushOrInvalidateAllocations( + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, const VkDeviceSize* sizes, + VMA_CACHE_OPERATION op); + + VkResult CopyMemoryToAllocation( + const void* pSrcHostPointer, + VmaAllocation dstAllocation, + VkDeviceSize dstAllocationLocalOffset, + VkDeviceSize size); + VkResult CopyAllocationToMemory( + VmaAllocation srcAllocation, + VkDeviceSize srcAllocationLocalOffset, + void* pDstHostPointer, + VkDeviceSize size); + + void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern); + + /* + Returns bit mask of memory types that can support defragmentation on GPU as + they support creation of required buffer for copy operations. + */ + uint32_t GetGpuDefragmentationMemoryTypeBits(); + +#if VMA_EXTERNAL_MEMORY + VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const + { + return m_TypeExternalMemoryHandleTypes[memTypeIndex]; + } +#endif // #if VMA_EXTERNAL_MEMORY + +private: + VkDeviceSize m_PreferredLargeHeapBlockSize; + + VkPhysicalDevice m_PhysicalDevice; + VMA_ATOMIC_UINT32 m_CurrentFrameIndex; + VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized. +#if VMA_EXTERNAL_MEMORY + VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES]; +#endif // #if VMA_EXTERNAL_MEMORY + + VMA_RW_MUTEX m_PoolsMutex; + typedef VmaIntrusiveLinkedList PoolList; + // Protected by m_PoolsMutex. + PoolList m_Pools; + uint32_t m_NextPoolId; + + VmaVulkanFunctions m_VulkanFunctions; + + // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types. + uint32_t m_GlobalMemoryTypeBits; + + void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions); + +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + void ImportVulkanFunctions_Static(); +#endif + + void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions); + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + void ImportVulkanFunctions_Dynamic(); +#endif + + void ValidateVulkanFunctions(); + + VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex); + + VkResult AllocateMemoryOfType( + VmaPool pool, + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedPreferred, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + VmaBlockVector& blockVector, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Helper function only to be used inside AllocateDedicatedMemory. + VkResult AllocateDedicatedMemoryPage( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + bool isMappingAllowed, + void* pUserData, + VmaAllocation* pAllocation); + + // Allocates and registers new VkDeviceMemory specifically for dedicated allocations. + VkResult AllocateDedicatedMemory( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + uint32_t memTypeIndex, + bool map, + bool isUserDataString, + bool isMappingAllowed, + bool canAliasMemory, + void* pUserData, + float priority, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + size_t allocationCount, + VmaAllocation* pAllocations, + const void* pNextChain = VMA_NULL); + + void FreeDedicatedMemory(const VmaAllocation allocation); + + VkResult CalcMemTypeParams( + VmaAllocationCreateInfo& outCreateInfo, + uint32_t memTypeIndex, + VkDeviceSize size, + size_t allocationCount); + VkResult CalcAllocationParams( + VmaAllocationCreateInfo& outCreateInfo, + bool dedicatedRequired, + bool dedicatedPreferred); + + /* + Calculates and returns bit mask of memory types that can support defragmentation + on GPU as they support creation of required buffer for copy operations. + */ + uint32_t CalculateGpuDefragmentationMemoryTypeBits() const; + uint32_t CalculateGlobalMemoryTypeBits() const; + + bool GetFlushOrInvalidateRange( + VmaAllocation allocation, + VkDeviceSize offset, VkDeviceSize size, + VkMappedMemoryRange& outRange) const; + +#if VMA_MEMORY_BUDGET + void UpdateVulkanBudget(); +#endif // #if VMA_MEMORY_BUDGET +}; + + +#ifndef _VMA_MEMORY_FUNCTIONS +static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) +{ + return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment); +} + +static void VmaFree(VmaAllocator hAllocator, void* ptr) +{ + VmaFree(&hAllocator->m_AllocationCallbacks, ptr); +} + +template +static T* VmaAllocate(VmaAllocator hAllocator) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +template +static void vma_delete(VmaAllocator hAllocator, T* ptr) +{ + if(ptr != VMA_NULL) + { + ptr->~T(); + VmaFree(hAllocator, ptr); + } +} + +template +static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count) +{ + if(ptr != VMA_NULL) + { + for(size_t i = count; i--; ) + ptr[i].~T(); + VmaFree(hAllocator, ptr); + } +} +#endif // _VMA_MEMORY_FUNCTIONS + +#ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS +VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) + : m_pMetadata(VMA_NULL), + m_MemoryTypeIndex(UINT32_MAX), + m_Id(0), + m_hMemory(VK_NULL_HANDLE), + m_MapCount(0), + m_pMappedData(VMA_NULL) {} + +VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock() +{ + VMA_ASSERT_LEAK(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped."); + VMA_ASSERT_LEAK(m_hMemory == VK_NULL_HANDLE); +} + +void VmaDeviceMemoryBlock::Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm, + VkDeviceSize bufferImageGranularity) +{ + VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); + + m_hParentPool = hParentPool; + m_MemoryTypeIndex = newMemoryTypeIndex; + m_Id = id; + m_hMemory = newMemory; + + switch (algorithm) + { + case 0: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual + break; + case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual + break; + default: + VMA_ASSERT(0); + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual + } + m_pMetadata->Init(newSize); +} + +void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) +{ + // Define macro VMA_DEBUG_LOG_FORMAT or more specialized VMA_LEAK_LOG_FORMAT + // to receive the list of the unfreed allocations. + if (!m_pMetadata->IsEmpty()) + m_pMetadata->DebugLogAllAllocations(); + // This is the most important assert in the entire library. + // Hitting it means you have some memory leak - unreleased VmaAllocation objects. + VMA_ASSERT_LEAK(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!"); + + VMA_ASSERT_LEAK(m_hMemory != VK_NULL_HANDLE); + allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory); + m_hMemory = VK_NULL_HANDLE; + + vma_delete(allocator, m_pMetadata); + m_pMetadata = VMA_NULL; +} + +void VmaDeviceMemoryBlock::PostAlloc(VmaAllocator hAllocator) +{ + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + m_MappingHysteresis.PostAlloc(); +} + +void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator) +{ + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + if(m_MappingHysteresis.PostFree()) + { + VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0); + if (m_MapCount == 0) + { + m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); + } + } +} + +bool VmaDeviceMemoryBlock::Validate() const +{ + VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) && + (m_pMetadata->GetSize() != 0)); + + return m_pMetadata->Validate(); +} + +VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) +{ + void* pData = VMA_NULL; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + res = m_pMetadata->CheckCorruption(pData); + + Unmap(hAllocator, 1); + + return res; +} + +VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData) +{ + if (count == 0) + { + return VK_SUCCESS; + } + + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); + if (oldTotalMapCount != 0) + { + VMA_ASSERT(m_pMappedData != VMA_NULL); + m_MappingHysteresis.PostMap(); + m_MapCount += count; + if (ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + return VK_SUCCESS; + } + else + { + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + &m_pMappedData); + if (result == VK_SUCCESS) + { + VMA_ASSERT(m_pMappedData != VMA_NULL); + m_MappingHysteresis.PostMap(); + m_MapCount = count; + if (ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + } + return result; + } +} + +void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) +{ + if (count == 0) + { + return; + } + + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + if (m_MapCount >= count) + { + m_MapCount -= count; + const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); + if (totalMapCount == 0) + { + m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); + } + m_MappingHysteresis.PostUnmap(); + } + else + { + VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped."); + } +} + +VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + + void* pData; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + VmaWriteMagicValue(pData, allocOffset + allocSize); + + Unmap(hAllocator, 1); + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + + void* pData; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + if (!VmaValidateMagicValue(pData, allocOffset + allocSize)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!"); + } + + Unmap(hAllocator, 1); + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::BindBufferMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext); +} + +VkResult VmaDeviceMemoryBlock::BindImageMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext); +} +#endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS + +#ifndef _VMA_ALLOCATION_T_FUNCTIONS +VmaAllocation_T::VmaAllocation_T(bool mappingAllowed) + : m_Alignment{ 1 }, + m_Size{ 0 }, + m_pUserData{ VMA_NULL }, + m_pName{ VMA_NULL }, + m_MemoryTypeIndex{ 0 }, + m_Type{ (uint8_t)ALLOCATION_TYPE_NONE }, + m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN }, + m_MapCount{ 0 }, + m_Flags{ 0 } +{ + if(mappingAllowed) + m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED; +} + +VmaAllocation_T::~VmaAllocation_T() +{ + VMA_ASSERT_LEAK(m_MapCount == 0 && "Allocation was not unmapped before destruction."); + + // Check if owned string was freed. + VMA_ASSERT(m_pName == VMA_NULL); +} + +void VmaAllocation_T::InitBlockAllocation( + VmaDeviceMemoryBlock* block, + VmaAllocHandle allocHandle, + VkDeviceSize alignment, + VkDeviceSize size, + uint32_t memoryTypeIndex, + VmaSuballocationType suballocationType, + bool mapped) +{ + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(block != VMA_NULL); + m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; + m_Alignment = alignment; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + if(mapped) + { + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; + } + m_SuballocationType = (uint8_t)suballocationType; + m_BlockAllocation.m_Block = block; + m_BlockAllocation.m_AllocHandle = allocHandle; +} + +void VmaAllocation_T::InitDedicatedAllocation( + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceMemory hMemory, + VmaSuballocationType suballocationType, + void* pMappedData, + VkDeviceSize size) +{ + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(hMemory != VK_NULL_HANDLE); + m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED; + m_Alignment = 0; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + m_SuballocationType = (uint8_t)suballocationType; + if(pMappedData != VMA_NULL) + { + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; + } + m_DedicatedAllocation.m_hParentPool = hParentPool; + m_DedicatedAllocation.m_hMemory = hMemory; + m_DedicatedAllocation.m_pMappedData = pMappedData; + m_DedicatedAllocation.m_Prev = VMA_NULL; + m_DedicatedAllocation.m_Next = VMA_NULL; +} + +void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName) +{ + VMA_ASSERT(pName == VMA_NULL || pName != m_pName); + + FreeName(hAllocator); + + if (pName != VMA_NULL) + m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName); +} + +uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation) +{ + VMA_ASSERT(allocation != VMA_NULL); + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK); + + if (m_MapCount != 0) + m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount); + + m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation); + std::swap(m_BlockAllocation, allocation->m_BlockAllocation); + m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this); + +#if VMA_STATS_STRING_ENABLED + std::swap(m_BufferImageUsage, allocation->m_BufferImageUsage); +#endif + return m_MapCount; +} + +VmaAllocHandle VmaAllocation_T::GetAllocHandle() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_AllocHandle; + case ALLOCATION_TYPE_DEDICATED: + return VK_NULL_HANDLE; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +VkDeviceSize VmaAllocation_T::GetOffset() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle); + case ALLOCATION_TYPE_DEDICATED: + return 0; + default: + VMA_ASSERT(0); + return 0; + } +} + +VmaPool VmaAllocation_T::GetParentPool() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->GetParentPool(); + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hParentPool; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +VkDeviceMemory VmaAllocation_T::GetMemory() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->GetDeviceMemory(); + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hMemory; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +void* VmaAllocation_T::GetMappedData() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + if (m_MapCount != 0 || IsPersistentMap()) + { + void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); + VMA_ASSERT(pBlockData != VMA_NULL); + return (char*)pBlockData + GetOffset(); + } + else + { + return VMA_NULL; + } + break; + case ALLOCATION_TYPE_DEDICATED: + VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap())); + return m_DedicatedAllocation.m_pMappedData; + default: + VMA_ASSERT(0); + return VMA_NULL; + } +} + +void VmaAllocation_T::BlockAllocMap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + + if (m_MapCount < 0xFF) + { + ++m_MapCount; + } + else + { + VMA_ASSERT(0 && "Allocation mapped too many times simultaneously."); + } +} + +void VmaAllocation_T::BlockAllocUnmap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + + if (m_MapCount > 0) + { + --m_MapCount; + } + else + { + VMA_ASSERT(0 && "Unmapping allocation not previously mapped."); + } +} + +VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + + if (m_MapCount != 0 || IsPersistentMap()) + { + if (m_MapCount < 0xFF) + { + VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL); + *ppData = m_DedicatedAllocation.m_pMappedData; + ++m_MapCount; + return VK_SUCCESS; + } + else + { + VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously."); + return VK_ERROR_MEMORY_MAP_FAILED; + } + } + else + { + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + ppData); + if (result == VK_SUCCESS) + { + m_DedicatedAllocation.m_pMappedData = *ppData; + m_MapCount = 1; + } + return result; + } +} + +void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + + if (m_MapCount > 0) + { + --m_MapCount; + if (m_MapCount == 0 && !IsPersistentMap()) + { + m_DedicatedAllocation.m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory); + } + } + else + { + VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped."); + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const +{ + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]); + + json.WriteString("Size"); + json.WriteNumber(m_Size); + json.WriteString("Usage"); + json.WriteNumber(m_BufferImageUsage.Value); // It may be uint32_t or uint64_t. + + if (m_pUserData != VMA_NULL) + { + json.WriteString("CustomData"); + json.BeginString(); + json.ContinueString_Pointer(m_pUserData); + json.EndString(); + } + if (m_pName != VMA_NULL) + { + json.WriteString("Name"); + json.WriteString(m_pName); + } +} +#endif // VMA_STATS_STRING_ENABLED + +void VmaAllocation_T::FreeName(VmaAllocator hAllocator) +{ + if(m_pName) + { + VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName); + m_pName = VMA_NULL; + } +} +#endif // _VMA_ALLOCATION_T_FUNCTIONS + +#ifndef _VMA_BLOCK_VECTOR_FUNCTIONS +VmaBlockVector::VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + bool explicitBlockSize, + uint32_t algorithm, + float priority, + VkDeviceSize minAllocationAlignment, + void* pMemoryAllocateNext) + : m_hAllocator(hAllocator), + m_hParentPool(hParentPool), + m_MemoryTypeIndex(memoryTypeIndex), + m_PreferredBlockSize(preferredBlockSize), + m_MinBlockCount(minBlockCount), + m_MaxBlockCount(maxBlockCount), + m_BufferImageGranularity(bufferImageGranularity), + m_ExplicitBlockSize(explicitBlockSize), + m_Algorithm(algorithm), + m_Priority(priority), + m_MinAllocationAlignment(minAllocationAlignment), + m_pMemoryAllocateNext(pMemoryAllocateNext), + m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_NextBlockId(0) {} + +VmaBlockVector::~VmaBlockVector() +{ + for (size_t i = m_Blocks.size(); i--; ) + { + m_Blocks[i]->Destroy(m_hAllocator); + vma_delete(m_hAllocator, m_Blocks[i]); + } +} + +VkResult VmaBlockVector::CreateMinBlocks() +{ + for (size_t i = 0; i < m_MinBlockCount; ++i) + { + VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL); + if (res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + const size_t blockCount = m_Blocks.size(); + for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + pBlock->m_pMetadata->AddStatistics(inoutStats); + } +} + +void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + const size_t blockCount = m_Blocks.size(); + for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + pBlock->m_pMetadata->AddDetailedStatistics(inoutStats); + } +} + +bool VmaBlockVector::IsEmpty() +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + return m_Blocks.empty(); +} + +bool VmaBlockVector::IsCorruptionDetectionEnabled() const +{ + const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + return (VMA_DEBUG_DETECT_CORRUPTION != 0) && + (VMA_DEBUG_MARGIN > 0) && + (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; +} + +VkResult VmaBlockVector::Allocate( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + size_t allocIndex; + VkResult res = VK_SUCCESS; + + alignment = VMA_MAX(alignment, m_MinAllocationAlignment); + + if (IsCorruptionDetectionEnabled()) + { + size = VmaAlignUp(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + alignment = VmaAlignUp(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + } + + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocatePage( + size, + alignment, + createInfo, + suballocType, + pAllocations + allocIndex); + if (res != VK_SUCCESS) + { + break; + } + } + } + + if (res != VK_SUCCESS) + { + // Free all already created allocations. + while (allocIndex--) + Free(pAllocations[allocIndex]); + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaBlockVector::AllocatePage( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ + const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + + VkDeviceSize freeMemory; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); + freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0; + } + + const bool canFallbackToDedicated = !HasExplicitBlockSize() && + (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0; + const bool canCreateNewBlock = + ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) && + (m_Blocks.size() < m_MaxBlockCount) && + (freeMemory >= size || !canFallbackToDedicated); + uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK; + + // Upper address can only be used with linear allocator and within single memory block. + if (isUpperAddress && + (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1)) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + // Early reject: requested allocation size is larger that maximum block size for this block vector. + if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + + // 1. Search existing allocations. Try to allocate. + if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Use only last block. + if (!m_Blocks.empty()) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back(); + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from last block #%" PRIu32, pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + else + { + if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default + { + const bool isHostVisible = + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; + if(isHostVisible) + { + const bool isMappingAllowed = (createInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; + /* + For non-mappable allocations, check blocks that are not mapped first. + For mappable allocations, check blocks that are already mapped first. + This way, having many blocks, we will separate mappable and non-mappable allocations, + hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc. + */ + for(size_t mappingI = 0; mappingI < 2; ++mappingI) + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL; + if((mappingI == 0) == (isMappingAllowed == isBlockMapped)) + { + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + } + else + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT + { + // Backward order in m_Blocks - prefer blocks with largest amount of free space. + for (size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%" PRIu32, pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + + // 2. Try to create new block. + if (canCreateNewBlock) + { + // Calculate optimal size for new block. + VkDeviceSize newBlockSize = m_PreferredBlockSize; + uint32_t newBlockSizeShift = 0; + const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3; + + if (!m_ExplicitBlockSize) + { + // Allocate 1/8, 1/4, 1/2 as first blocks. + const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize(); + for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + } + else + { + break; + } + } + } + + size_t newBlockIndex = 0; + VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize. + if (!m_ExplicitBlockSize) + { + while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if (smallerNewBlockSize >= size) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { + break; + } + } + } + + if (res == VK_SUCCESS) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex]; + VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size); + + res = AllocateFromBlock( + pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Created new block #%" PRIu32 " Size=%" PRIu64, pBlock->GetId(), newBlockSize); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + else + { + // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + } + + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +void VmaBlockVector::Free(const VmaAllocation hAllocation) +{ + VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; + + bool budgetExceeded = false; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); + budgetExceeded = heapBudget.usage >= heapBudget.budget; + } + + // Scope for lock. + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + + if (IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize()); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value."); + } + + if (hAllocation->IsPersistentMap()) + { + pBlock->Unmap(m_hAllocator, 1); + } + + const bool hadEmptyBlockBeforeFree = HasEmptyBlock(); + pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle()); + pBlock->PostFree(m_hAllocator); + VMA_HEAVY_ASSERT(pBlock->Validate()); + + VMA_DEBUG_LOG_FORMAT(" Freed from MemoryTypeIndex=%" PRIu32, m_MemoryTypeIndex); + + const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount; + // pBlock became empty after this deallocation. + if (pBlock->m_pMetadata->IsEmpty()) + { + // Already had empty block. We don't want to have two, so delete this one. + if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock) + { + pBlockToDelete = pBlock; + Remove(pBlock); + } + // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth. + } + // pBlock didn't become empty, but we have another empty block - find and free that one. + // (This is optional, heuristics.) + else if (hadEmptyBlockBeforeFree && canDeleteBlock) + { + VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back(); + if (pLastBlock->m_pMetadata->IsEmpty()) + { + pBlockToDelete = pLastBlock; + m_Blocks.pop_back(); + } + } + + IncrementallySortBlocks(); + } + + // Destruction of a free block. Deferred until this point, outside of mutex + // lock, for performance reason. + if (pBlockToDelete != VMA_NULL) + { + VMA_DEBUG_LOG_FORMAT(" Deleted empty block #%" PRIu32, pBlockToDelete->GetId()); + pBlockToDelete->Destroy(m_hAllocator); + vma_delete(m_hAllocator, pBlockToDelete); + } + + m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize()); + m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation); +} + +VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const +{ + VkDeviceSize result = 0; + for (size_t i = m_Blocks.size(); i--; ) + { + result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize()); + if (result >= m_PreferredBlockSize) + { + break; + } + } + return result; +} + +void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock) +{ + for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + if (m_Blocks[blockIndex] == pBlock) + { + VmaVectorRemove(m_Blocks, blockIndex); + return; + } + } + VMA_ASSERT(0); +} + +void VmaBlockVector::IncrementallySortBlocks() +{ + if (!m_IncrementalSort) + return; + if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Bubble sort only until first swap. + for (size_t i = 1; i < m_Blocks.size(); ++i) + { + if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize()) + { + std::swap(m_Blocks[i - 1], m_Blocks[i]); + return; + } + } + } +} + +void VmaBlockVector::SortByFreeSize() +{ + VMA_SORT(m_Blocks.begin(), m_Blocks.end(), + [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool + { + return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize(); + }); +} + +VkResult VmaBlockVector::AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation) +{ + const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + + VmaAllocationRequest currRequest = {}; + if (pBlock->m_pMetadata->CreateAllocationRequest( + size, + alignment, + isUpperAddress, + suballocType, + strategy, + &currRequest)) + { + return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation); + } + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +VkResult VmaBlockVector::CommitAllocationRequest( + VmaAllocationRequest& allocRequest, + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ + const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; + const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; + const bool isMappingAllowed = (allocFlags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; + + pBlock->PostAlloc(m_hAllocator); + // Allocate from pCurrBlock. + if (mapped) + { + VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL); + if (res != VK_SUCCESS) + { + return res; + } + } + + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed); + pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation); + (*pAllocation)->InitBlockAllocation( + pBlock, + allocRequest.allocHandle, + alignment, + allocRequest.size, // Not size, as actual allocation size may be larger than requested! + m_MemoryTypeIndex, + suballocType, + mapped); + VMA_HEAVY_ASSERT(pBlock->Validate()); + if (isUserDataString) + (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData); + else + (*pAllocation)->SetUserData(m_hAllocator, pUserData); + m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size); + if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + if (IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); + } + return VK_SUCCESS; +} + +VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) +{ + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.pNext = m_pMemoryAllocateNext; + allocInfo.memoryTypeIndex = m_MemoryTypeIndex; + allocInfo.allocationSize = blockSize; + +#if VMA_BUFFER_DEVICE_ADDRESS + // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature. + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + if (m_hAllocator->m_UseKhrBufferDeviceAddress) + { + allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; + VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); + } +#endif // VMA_BUFFER_DEVICE_ADDRESS + +#if VMA_MEMORY_PRIORITY + VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; + if (m_hAllocator->m_UseExtMemoryPriority) + { + VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f); + priorityInfo.priority = m_Priority; + VmaPnextChainPushFront(&allocInfo, &priorityInfo); + } +#endif // VMA_MEMORY_PRIORITY + +#if VMA_EXTERNAL_MEMORY + // Attach VkExportMemoryAllocateInfoKHR if necessary. + VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; + exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex); + if (exportMemoryAllocInfo.handleTypes != 0) + { + VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); + } +#endif // VMA_EXTERNAL_MEMORY + + VkDeviceMemory mem = VK_NULL_HANDLE; + VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem); + if (res < 0) + { + return res; + } + + // New VkDeviceMemory successfully created. + + // Create new Allocation for it. + VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); + pBlock->Init( + m_hAllocator, + m_hParentPool, + m_MemoryTypeIndex, + mem, + allocInfo.allocationSize, + m_NextBlockId++, + m_Algorithm, + m_BufferImageGranularity); + + m_Blocks.push_back(pBlock); + if (pNewBlockIndex != VMA_NULL) + { + *pNewBlockIndex = m_Blocks.size() - 1; + } + + return VK_SUCCESS; +} + +bool VmaBlockVector::HasEmptyBlock() +{ + for (size_t index = 0, count = m_Blocks.size(); index < count; ++index) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[index]; + if (pBlock->m_pMetadata->IsEmpty()) + { + return true; + } + } + return false; +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + + json.BeginObject(); + for (size_t i = 0; i < m_Blocks.size(); ++i) + { + json.BeginString(); + json.ContinueString(m_Blocks[i]->GetId()); + json.EndString(); + + json.BeginObject(); + json.WriteString("MapRefCount"); + json.WriteNumber(m_Blocks[i]->GetMapRefCount()); + + m_Blocks[i]->m_pMetadata->PrintDetailedMap(json); + json.EndObject(); + } + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED + +VkResult VmaBlockVector::CheckCorruption() +{ + if (!IsCorruptionDetectionEnabled()) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VkResult res = pBlock->CheckCorruption(m_hAllocator); + if (res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +#endif // _VMA_BLOCK_VECTOR_FUNCTIONS + +#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS +VmaDefragmentationContext_T::VmaDefragmentationContext_T( + VmaAllocator hAllocator, + const VmaDefragmentationInfo& info) + : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass), + m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass), + m_BreakCallback(info.pfnBreakCallback), + m_BreakCallbackUserData(info.pBreakCallbackUserData), + m_MoveAllocator(hAllocator->GetAllocationCallbacks()), + m_Moves(m_MoveAllocator) +{ + m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK; + + if (info.pool != VMA_NULL) + { + m_BlockVectorCount = 1; + m_PoolBlockVector = &info.pool->m_BlockVector; + m_pBlockVectors = &m_PoolBlockVector; + m_PoolBlockVector->SetIncrementalSort(false); + m_PoolBlockVector->SortByFreeSize(); + } + else + { + m_BlockVectorCount = hAllocator->GetMemoryTypeCount(); + m_PoolBlockVector = VMA_NULL; + m_pBlockVectors = hAllocator->m_pBlockVectors; + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + VmaBlockVector* vector = m_pBlockVectors[i]; + if (vector != VMA_NULL) + { + vector->SetIncrementalSort(false); + vector->SortByFreeSize(); + } + } + } + + switch (m_Algorithm) + { + case 0: // Default algorithm + m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT; + m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount); + break; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount); + break; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + if (hAllocator->GetBufferImageGranularity() > 1) + { + m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount); + } + break; + } +} + +VmaDefragmentationContext_T::~VmaDefragmentationContext_T() +{ + if (m_PoolBlockVector != VMA_NULL) + { + m_PoolBlockVector->SetIncrementalSort(true); + } + else + { + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + VmaBlockVector* vector = m_pBlockVectors[i]; + if (vector != VMA_NULL) + vector->SetIncrementalSort(true); + } + } + + if (m_AlgorithmState) + { + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); + break; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); + break; + default: + VMA_ASSERT(0); + } + } +} + +VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo) +{ + if (m_PoolBlockVector != VMA_NULL) + { + VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex); + + if (m_PoolBlockVector->GetBlockCount() > 1) + ComputeDefragmentation(*m_PoolBlockVector, 0); + else if (m_PoolBlockVector->GetBlockCount() == 1) + ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0)); + } + else + { + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + if (m_pBlockVectors[i] != VMA_NULL) + { + VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex); + + if (m_pBlockVectors[i]->GetBlockCount() > 1) + { + if (ComputeDefragmentation(*m_pBlockVectors[i], i)) + break; + } + else if (m_pBlockVectors[i]->GetBlockCount() == 1) + { + if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0))) + break; + } + } + } + } + + moveInfo.moveCount = static_cast(m_Moves.size()); + if (moveInfo.moveCount > 0) + { + moveInfo.pMoves = m_Moves.data(); + return VK_INCOMPLETE; + } + + moveInfo.pMoves = VMA_NULL; + return VK_SUCCESS; +} + +VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo) +{ + VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true); + + VkResult result = VK_SUCCESS; + VmaStlAllocator blockAllocator(m_MoveAllocator.m_pCallbacks); + VmaVector> immovableBlocks(blockAllocator); + VmaVector> mappedBlocks(blockAllocator); + + VmaAllocator allocator = VMA_NULL; + for (uint32_t i = 0; i < moveInfo.moveCount; ++i) + { + VmaDefragmentationMove& move = moveInfo.pMoves[i]; + size_t prevCount = 0, currentCount = 0; + VkDeviceSize freedBlockSize = 0; + + uint32_t vectorIndex; + VmaBlockVector* vector; + if (m_PoolBlockVector != VMA_NULL) + { + vectorIndex = 0; + vector = m_PoolBlockVector; + } + else + { + vectorIndex = move.srcAllocation->GetMemoryTypeIndex(); + vector = m_pBlockVectors[vectorIndex]; + VMA_ASSERT(vector != VMA_NULL); + } + + switch (move.operation) + { + case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY: + { + uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation); + if (mapCount > 0) + { + allocator = vector->m_hAllocator; + VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock(); + bool notPresent = true; + for (FragmentedBlock& block : mappedBlocks) + { + if (block.block == newMapBlock) + { + notPresent = false; + block.data += mapCount; + break; + } + } + if (notPresent) + mappedBlocks.push_back({ mapCount, newMapBlock }); + } + + // Scope for locks, Free have it's own lock + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + prevCount = vector->GetBlockCount(); + freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.dstTmpAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + currentCount = vector->GetBlockCount(); + } + + result = VK_INCOMPLETE; + break; + } + case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE: + { + m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); + --m_PassStats.allocationsMoved; + vector->Free(move.dstTmpAllocation); + + VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock(); + bool notPresent = true; + for (const FragmentedBlock& block : immovableBlocks) + { + if (block.block == newBlock) + { + notPresent = false; + break; + } + } + if (notPresent) + immovableBlocks.push_back({ vectorIndex, newBlock }); + break; + } + case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY: + { + m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); + --m_PassStats.allocationsMoved; + // Scope for locks, Free have it's own lock + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + prevCount = vector->GetBlockCount(); + freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.srcAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + currentCount = vector->GetBlockCount(); + } + freedBlockSize *= prevCount - currentCount; + + VkDeviceSize dstBlockSize; + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.dstTmpAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount()); + currentCount = vector->GetBlockCount(); + } + + result = VK_INCOMPLETE; + break; + } + default: + VMA_ASSERT(0); + } + + if (prevCount > currentCount) + { + size_t freedBlocks = prevCount - currentCount; + m_PassStats.deviceMemoryBlocksFreed += static_cast(freedBlocks); + m_PassStats.bytesFreed += freedBlockSize; + } + + if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT && + m_AlgorithmState != VMA_NULL) + { + // Avoid unnecessary tries to allocate when new free block is available + StateExtensive& state = reinterpret_cast(m_AlgorithmState)[vectorIndex]; + if (state.firstFreeBlock != SIZE_MAX) + { + const size_t diff = prevCount - currentCount; + if (state.firstFreeBlock >= diff) + { + state.firstFreeBlock -= diff; + if (state.firstFreeBlock != 0) + state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty(); + } + else + state.firstFreeBlock = 0; + } + } + } + moveInfo.moveCount = 0; + moveInfo.pMoves = VMA_NULL; + m_Moves.clear(); + + // Update stats + m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved; + m_GlobalStats.bytesFreed += m_PassStats.bytesFreed; + m_GlobalStats.bytesMoved += m_PassStats.bytesMoved; + m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed; + m_PassStats = { 0 }; + + // Move blocks with immovable allocations according to algorithm + if (immovableBlocks.size() > 0) + { + do + { + if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT) + { + if (m_AlgorithmState != VMA_NULL) + { + bool swapped = false; + // Move to the start of free blocks range + for (const FragmentedBlock& block : immovableBlocks) + { + StateExtensive& state = reinterpret_cast(m_AlgorithmState)[block.data]; + if (state.operation != StateExtensive::Operation::Cleanup) + { + VmaBlockVector* vector = m_pBlockVectors[block.data]; + VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + + for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i) + { + if (vector->GetBlock(i) == block.block) + { + std::swap(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]); + if (state.firstFreeBlock != SIZE_MAX) + { + if (i + 1 < state.firstFreeBlock) + { + if (state.firstFreeBlock > 1) + std::swap(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]); + else + --state.firstFreeBlock; + } + } + swapped = true; + break; + } + } + } + } + if (swapped) + result = VK_INCOMPLETE; + break; + } + } + + // Move to the beginning + for (const FragmentedBlock& block : immovableBlocks) + { + VmaBlockVector* vector = m_pBlockVectors[block.data]; + VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + + for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i) + { + if (vector->GetBlock(i) == block.block) + { + std::swap(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]); + break; + } + } + } + } while (false); + } + + // Bulk-map destination blocks + for (const FragmentedBlock& block : mappedBlocks) + { + VkResult res = block.block->Map(allocator, block.data, VMA_NULL); + VMA_ASSERT(res == VK_SUCCESS); + } + return result; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index) +{ + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT: + return ComputeDefragmentation_Fast(vector); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + return ComputeDefragmentation_Balanced(vector, index, true); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT: + return ComputeDefragmentation_Full(vector); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + return ComputeDefragmentation_Extensive(vector, index); + default: + VMA_ASSERT(0); + return ComputeDefragmentation_Balanced(vector, index, true); + } +} + +VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData( + VmaAllocHandle handle, VmaBlockMetadata* metadata) +{ + MoveAllocationData moveData; + moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle); + moveData.size = moveData.move.srcAllocation->GetSize(); + moveData.alignment = moveData.move.srcAllocation->GetAlignment(); + moveData.type = moveData.move.srcAllocation->GetSuballocationType(); + moveData.flags = 0; + + if (moveData.move.srcAllocation->IsPersistentMap()) + moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT; + if (moveData.move.srcAllocation->IsMappingAllowed()) + moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; + + return moveData; +} + +VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes) +{ + // Check custom criteria if exists + if (m_BreakCallback && m_BreakCallback(m_BreakCallbackUserData)) + return CounterStatus::End; + + // Ignore allocation if will exceed max size for copy + if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes) + { + if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE) + return CounterStatus::Ignore; + else + return CounterStatus::End; + } + else + m_IgnoredAllocs = 0; + return CounterStatus::Pass; +} + +bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes) +{ + m_PassStats.bytesMoved += bytes; + // Early return when max found + if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes) + { + VMA_ASSERT((m_PassStats.allocationsMoved == m_MaxPassAllocations || + m_PassStats.bytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!"); + return true; + } + return false; +} + +bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block) +{ + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector) +{ + for (; start < end; ++start) + { + VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start); + if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size) + { + if (vector.AllocateFromBlock(dstBlock, + data.size, + data.alignment, + data.flags, + this, + data.type, + 0, + &data.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(data.move); + if (IncrementCounters(data.size)) + return true; + break; + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector) +{ + // Move only between blocks + + // Go through allocations in last blocks and try to fit them inside first ones + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update) +{ + // Go over every allocation and try to fit it in previous blocks at lowest offsets, + // if not possible: realloc within single block to minimize offset (exclude offset == 0), + // but only if there are noticeable gaps between them (some heuristic, ex. average size of allocation in block) + VMA_ASSERT(m_AlgorithmState != VMA_NULL); + + StateBalanced& vectorState = reinterpret_cast(m_AlgorithmState)[index]; + if (update && vectorState.avgAllocSize == UINT64_MAX) + UpdateVectorStatistics(vector, vectorState); + + const size_t startMoveCount = m_Moves.size(); + VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2; + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(i); + VmaBlockMetadata* metadata = block->m_pMetadata; + VkDeviceSize prevFreeRegionSize = 0; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + const size_t prevMoveCount = m_Moves.size(); + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + + VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle); + // If no room found then realloc within block for lower offset + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + // Check if realloc will make sense + if (prevFreeRegionSize >= minimalFreeRegion || + nextFreeRegionSize >= minimalFreeRegion || + moveData.size <= vectorState.avgFreeSize || + moveData.size <= vectorState.avgAllocSize) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + prevFreeRegionSize = nextFreeRegionSize; + } + } + + // No moves performed, update statistics to current vector state + if (startMoveCount == m_Moves.size() && !update) + { + vectorState.avgAllocSize = UINT64_MAX; + return ComputeDefragmentation_Balanced(vector, index, false); + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector) +{ + // Go over every allocation and try to fit it in previous blocks at lowest offsets, + // if not possible: realloc within single block to minimize offset (exclude offset == 0) + + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(i); + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + const size_t prevMoveCount = m_Moves.size(); + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + + // If no room found then realloc within block for lower offset + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index) +{ + // First free single block, then populate it to the brim, then free another block, and so on + + // Fallback to previous algorithm since without granularity conflicts it can achieve max packing + if (vector.m_BufferImageGranularity == 1) + return ComputeDefragmentation_Full(vector); + + VMA_ASSERT(m_AlgorithmState != VMA_NULL); + + StateExtensive& vectorState = reinterpret_cast(m_AlgorithmState)[index]; + + bool texturePresent = false, bufferPresent = false, otherPresent = false; + switch (vectorState.operation) + { + case StateExtensive::Operation::Done: // Vector defragmented + return false; + case StateExtensive::Operation::FindFreeBlockBuffer: + case StateExtensive::Operation::FindFreeBlockTexture: + case StateExtensive::Operation::FindFreeBlockAll: + { + // No more blocks to free, just perform fast realloc and move to cleanup + if (vectorState.firstFreeBlock == 0) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + return ComputeDefragmentation_Fast(vector); + } + + // No free blocks, have to clear last one + size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1; + VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata; + + const size_t prevMoveCount = m_Moves.size(); + for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = freeMetadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, freeMetadata); + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + if (AllocInOtherBlock(0, last, moveData, vector)) + { + // Full clear performed already + if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE) + vectorState.firstFreeBlock = last; + return true; + } + } + + if (prevMoveCount == m_Moves.size()) + { + // Cannot perform full clear, have to move data in other blocks around + if (last != 0) + { + for (size_t i = last - 1; i; --i) + { + if (ReallocWithinBlock(vector, vector.GetBlock(i))) + return true; + } + } + + if (prevMoveCount == m_Moves.size()) + { + // No possible reallocs within blocks, try to move them around fast + return ComputeDefragmentation_Fast(vector); + } + } + else + { + switch (vectorState.operation) + { + case StateExtensive::Operation::FindFreeBlockBuffer: + vectorState.operation = StateExtensive::Operation::MoveBuffers; + break; + case StateExtensive::Operation::FindFreeBlockTexture: + vectorState.operation = StateExtensive::Operation::MoveTextures; + break; + case StateExtensive::Operation::FindFreeBlockAll: + vectorState.operation = StateExtensive::Operation::MoveAll; + break; + default: + VMA_ASSERT(0); + vectorState.operation = StateExtensive::Operation::MoveTextures; + } + vectorState.firstFreeBlock = last; + // Nothing done, block found without reallocations, can perform another reallocs in same pass + return ComputeDefragmentation_Extensive(vector, index); + } + break; + } + case StateExtensive::Operation::MoveTextures: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (texturePresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture; + return ComputeDefragmentation_Extensive(vector, index); + } + + if (!bufferPresent && !otherPresent) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + break; + } + + // No more textures to move, check buffers + vectorState.operation = StateExtensive::Operation::MoveBuffers; + bufferPresent = false; + otherPresent = false; + } + else + break; + VMA_FALLTHROUGH; // Fallthrough + } + case StateExtensive::Operation::MoveBuffers: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (bufferPresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; + return ComputeDefragmentation_Extensive(vector, index); + } + + if (!otherPresent) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + break; + } + + // No more buffers to move, check all others + vectorState.operation = StateExtensive::Operation::MoveAll; + otherPresent = false; + } + else + break; + VMA_FALLTHROUGH; // Fallthrough + } + case StateExtensive::Operation::MoveAll: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (otherPresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; + return ComputeDefragmentation_Extensive(vector, index); + } + // Everything moved + vectorState.operation = StateExtensive::Operation::Cleanup; + } + break; + } + case StateExtensive::Operation::Cleanup: + // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062). + break; + } + + if (vectorState.operation == StateExtensive::Operation::Cleanup) + { + // All other work done, pack data in blocks even tighter if possible + const size_t prevMoveCount = m_Moves.size(); + for (size_t i = 0; i < vector.GetBlockCount(); ++i) + { + if (ReallocWithinBlock(vector, vector.GetBlock(i))) + return true; + } + + if (prevMoveCount == m_Moves.size()) + vectorState.operation = StateExtensive::Operation::Done; + } + return false; +} + +void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state) +{ + size_t allocCount = 0; + size_t freeCount = 0; + state.avgFreeSize = 0; + state.avgAllocSize = 0; + + for (size_t i = 0; i < vector.GetBlockCount(); ++i) + { + VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; + + allocCount += metadata->GetAllocationCount(); + freeCount += metadata->GetFreeRegionsCount(); + state.avgFreeSize += metadata->GetSumFreeSize(); + state.avgAllocSize += metadata->GetSize(); + } + + state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount; + state.avgFreeSize /= freeCount; +} + +bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType, + VmaBlockVector& vector, size_t firstFreeBlock, + bool& texturePresent, bool& bufferPresent, bool& otherPresent) +{ + const size_t prevMoveCount = m_Moves.size(); + for (size_t i = firstFreeBlock ; i;) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(--i); + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Move only single type of resources at once + if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType)) + { + // Try to fit allocation into free blocks + if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector)) + return false; + } + + if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)) + texturePresent = true; + else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER)) + bufferPresent = true; + else + otherPresent = true; + } + } + return prevMoveCount == m_Moves.size(); +} +#endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS + +#ifndef _VMA_POOL_T_FUNCTIONS +VmaPool_T::VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize) + : m_BlockVector( + hAllocator, + this, // hParentPool + createInfo.memoryTypeIndex, + createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, + createInfo.minBlockCount, + createInfo.maxBlockCount, + (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), + createInfo.blockSize != 0, // explicitBlockSize + createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm + createInfo.priority, + VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment), + createInfo.pMemoryAllocateNext), + m_Id(0), + m_Name(VMA_NULL) {} + +VmaPool_T::~VmaPool_T() +{ + VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL); + + const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); + VmaFreeString(allocs, m_Name); +} + +void VmaPool_T::SetName(const char* pName) +{ + const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); + VmaFreeString(allocs, m_Name); + + if (pName != VMA_NULL) + { + m_Name = VmaCreateStringCopy(allocs, pName); + } + else + { + m_Name = VMA_NULL; + } +} +#endif // _VMA_POOL_T_FUNCTIONS + +#ifndef _VMA_ALLOCATOR_T_FUNCTIONS +VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : + m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), + m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0), + m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), + m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0), + m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0), + m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0), + m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0), + m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0), + m_UseKhrMaintenance4((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT) != 0), + m_UseKhrMaintenance5((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT) != 0), + m_hDevice(pCreateInfo->device), + m_hInstance(pCreateInfo->instance), + m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? + *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks), + m_AllocationObjectAllocator(&m_AllocationCallbacks), + m_HeapSizeLimitMask(0), + m_DeviceMemoryCount(0), + m_PreferredLargeHeapBlockSize(0), + m_PhysicalDevice(pCreateInfo->physicalDevice), + m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), + m_NextPoolId(0), + m_GlobalMemoryTypeBits(UINT32_MAX) +{ + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_UseKhrDedicatedAllocation = false; + m_UseKhrBindMemory2 = false; + } + + if(VMA_DEBUG_DETECT_CORRUPTION) + { + // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it. + VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0); + } + + VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance); + + if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) + { +#if !(VMA_DEDICATED_ALLOCATION) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros."); + } +#endif +#if !(VMA_BIND_MEMORY2) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros."); + } +#endif + } +#if !(VMA_MEMORY_BUDGET) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros."); + } +#endif +#if !(VMA_BUFFER_DEVICE_ADDRESS) + if(m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if VMA_VULKAN_VERSION < 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_3 but required Vulkan version is disabled by preprocessor macros."); + } +#endif +#if VMA_VULKAN_VERSION < 1002000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros."); + } +#endif +#if VMA_VULKAN_VERSION < 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros."); + } +#endif +#if !(VMA_MEMORY_PRIORITY) + if(m_UseExtMemoryPriority) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if !(VMA_KHR_MAINTENANCE4) + if(m_UseKhrMaintenance4) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if !(VMA_KHR_MAINTENANCE5) + if(m_UseKhrMaintenance5) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif + + memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks)); + memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties)); + memset(&m_MemProps, 0, sizeof(m_MemProps)); + + memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors)); + memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions)); + +#if VMA_EXTERNAL_MEMORY + memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes)); +#endif // #if VMA_EXTERNAL_MEMORY + + if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) + { + m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData; + m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate; + m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree; + } + + ImportVulkanFunctions(pCreateInfo->pVulkanFunctions); + + (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties); + (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps); + + VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT)); + VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize)); + + m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ? + pCreateInfo->preferredLargeHeapBlockSize : static_cast(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); + + m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits(); + +#if VMA_EXTERNAL_MEMORY + if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL) + { + memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes, + sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount()); + } +#endif // #if VMA_EXTERNAL_MEMORY + + if(pCreateInfo->pHeapSizeLimit != VMA_NULL) + { + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex]; + if(limit != VK_WHOLE_SIZE) + { + m_HeapSizeLimitMask |= 1u << heapIndex; + if(limit < m_MemProps.memoryHeaps[heapIndex].size) + { + m_MemProps.memoryHeaps[heapIndex].size = limit; + } + } + } + } + + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + // Create only supported types + if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) + { + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex); + m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)( + this, + VK_NULL_HANDLE, // hParentPool + memTypeIndex, + preferredBlockSize, + 0, + SIZE_MAX, + GetBufferImageGranularity(), + false, // explicitBlockSize + 0, // algorithm + 0.5f, // priority (0.5 is the default per Vulkan spec) + GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment + VMA_NULL); // // pMemoryAllocateNext + // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, + // because minBlockCount is 0. + } + } +} + +VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo) +{ + VkResult res = VK_SUCCESS; + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET + + return res; +} + +VmaAllocator_T::~VmaAllocator_T() +{ + VMA_ASSERT(m_Pools.IsEmpty()); + + for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; ) + { + vma_delete(this, m_pBlockVectors[memTypeIndex]); + } +} + +void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions) +{ +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + ImportVulkanFunctions_Static(); +#endif + + if(pVulkanFunctions != VMA_NULL) + { + ImportVulkanFunctions_Custom(pVulkanFunctions); + } + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + ImportVulkanFunctions_Dynamic(); +#endif + + ValidateVulkanFunctions(); +} + +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Static() +{ + // Vulkan 1.0 + m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr; + m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr; + m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties; + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties; + m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; + m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory; + m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory; + m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory; + m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges; + m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges; + m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory; + m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory; + m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements; + m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements; + m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer; + m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer; + m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage; + m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage; + m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer; + + // Vulkan 1.1 +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2; + m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2; + m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2; + m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2; + } +#endif + +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2; + } +#endif + +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements; + m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements; + } +#endif +} + +#endif // VMA_STATIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions) +{ + VMA_ASSERT(pVulkanFunctions != VMA_NULL); + +#define VMA_COPY_IF_NOT_NULL(funcName) \ + if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; + + VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr); + VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr); + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties); + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties); + VMA_COPY_IF_NOT_NULL(vkAllocateMemory); + VMA_COPY_IF_NOT_NULL(vkFreeMemory); + VMA_COPY_IF_NOT_NULL(vkMapMemory); + VMA_COPY_IF_NOT_NULL(vkUnmapMemory); + VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory); + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkCreateBuffer); + VMA_COPY_IF_NOT_NULL(vkDestroyBuffer); + VMA_COPY_IF_NOT_NULL(vkCreateImage); + VMA_COPY_IF_NOT_NULL(vkDestroyImage); + VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer); + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR); +#endif + +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR); +#endif + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR); +#endif + +#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements); +#endif + +#undef VMA_COPY_IF_NOT_NULL +} + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Dynamic() +{ + VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr && + "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass " + "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. " + "Other members can be null."); + +#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \ + if(m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ + (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString); +#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \ + if(m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ + (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString); + + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties"); + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties"); + VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory"); + VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory"); + VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory"); + VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory"); + VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges"); + VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges"); + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory"); + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer"); + VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer"); + VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage"); + VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage"); + VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer"); + +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2"); + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2"); + } +#endif + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2"); + } + else if(m_UseExtMemoryBudget) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); + } +#endif + +#if VMA_DEDICATED_ALLOCATION + if(m_UseKhrDedicatedAllocation) + { + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR"); + } +#endif + +#if VMA_BIND_MEMORY2 + if(m_UseKhrBindMemory2) + { + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR"); + } +#endif // #if VMA_BIND_MEMORY2 + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2"); + } + else if(m_UseExtMemoryBudget) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); + } +#endif // #if VMA_MEMORY_BUDGET + +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements"); + } +#endif +#if VMA_KHR_MAINTENANCE4 + if(m_UseKhrMaintenance4) + { + VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirementsKHR, "vkGetDeviceBufferMemoryRequirementsKHR"); + VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirementsKHR, "vkGetDeviceImageMemoryRequirementsKHR"); + } +#endif + +#undef VMA_FETCH_DEVICE_FUNC +#undef VMA_FETCH_INSTANCE_FUNC +} + +#endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ValidateVulkanFunctions() +{ + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL); + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation) + { + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL); + } +#endif + +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2) + { + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL); + } +#endif + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL); + } +#endif + + // Not validating these due to suspected driver bugs with these function + // pointers being null despite correct extension or Vulkan version is enabled. + // See issue #397. Their usage in VMA is optional anyway. + // + // VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL); + // VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL); +} + +VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) +{ + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE; + return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32); +} + +VkResult VmaAllocator_T::AllocateMemoryOfType( + VmaPool pool, + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedPreferred, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + VmaBlockVector& blockVector, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations != VMA_NULL); + VMA_DEBUG_LOG_FORMAT(" AllocateMemory: MemoryTypeIndex=%" PRIu32 ", AllocationCount=%zu, Size=%" PRIu64, memTypeIndex, allocationCount, size); + + VmaAllocationCreateInfo finalCreateInfo = createInfo; + VkResult res = CalcMemTypeParams( + finalCreateInfo, + memTypeIndex, + size, + allocationCount); + if(res != VK_SUCCESS) + return res; + + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + { + return AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + blockVector.GetAllocationNextPtr()); + } + else + { + const bool canAllocateDedicated = + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && + (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize()); + + if(canAllocateDedicated) + { + // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. + if(size > blockVector.GetPreferredBlockSize() / 2) + { + dedicatedPreferred = true; + } + // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget, + // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above + // 3/4 of the maximum allocation count. + if(m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount < UINT32_MAX / 4 && + m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4) + { + dedicatedPreferred = false; + } + + if(dedicatedPreferred) + { + res = AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + blockVector.GetAllocationNextPtr()); + if(res == VK_SUCCESS) + { + // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here. + VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); + return VK_SUCCESS; + } + } + } + + res = blockVector.Allocate( + size, + alignment, + finalCreateInfo, + suballocType, + allocationCount, + pAllocations); + if(res == VK_SUCCESS) + return VK_SUCCESS; + + // Try dedicated memory. + if(canAllocateDedicated && !dedicatedPreferred) + { + res = AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + blockVector.GetAllocationNextPtr()); + if(res == VK_SUCCESS) + { + // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here. + VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); + return VK_SUCCESS; + } + } + // Everything failed: Return error code. + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } +} + +VkResult VmaAllocator_T::AllocateDedicatedMemory( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + uint32_t memTypeIndex, + bool map, + bool isUserDataString, + bool isMappingAllowed, + bool canAliasMemory, + void* pUserData, + float priority, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + size_t allocationCount, + VmaAllocation* pAllocations, + const void* pNextChain) +{ + VMA_ASSERT(allocationCount > 0 && pAllocations); + + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.memoryTypeIndex = memTypeIndex; + allocInfo.allocationSize = size; + allocInfo.pNext = pNextChain; + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR }; + if(!canAliasMemory) + { + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + if(dedicatedBuffer != VK_NULL_HANDLE) + { + VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE); + dedicatedAllocInfo.buffer = dedicatedBuffer; + VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + dedicatedAllocInfo.image = dedicatedImage; + VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); + } + } + } +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + +#if VMA_BUFFER_DEVICE_ADDRESS + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + if(m_UseKhrBufferDeviceAddress) + { + bool canContainBufferWithDeviceAddress = true; + if(dedicatedBuffer != VK_NULL_HANDLE) + { + canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == VmaBufferImageUsage::UNKNOWN || + dedicatedBufferImageUsage.Contains(VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT); + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + canContainBufferWithDeviceAddress = false; + } + if(canContainBufferWithDeviceAddress) + { + allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; + VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); + } + } +#endif // #if VMA_BUFFER_DEVICE_ADDRESS + +#if VMA_MEMORY_PRIORITY + VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; + if(m_UseExtMemoryPriority) + { + VMA_ASSERT(priority >= 0.f && priority <= 1.f); + priorityInfo.priority = priority; + VmaPnextChainPushFront(&allocInfo, &priorityInfo); + } +#endif // #if VMA_MEMORY_PRIORITY + +#if VMA_EXTERNAL_MEMORY + // Attach VkExportMemoryAllocateInfoKHR if necessary. + VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; + exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex); + if(exportMemoryAllocInfo.handleTypes != 0) + { + VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); + } +#endif // #if VMA_EXTERNAL_MEMORY + + size_t allocIndex; + VkResult res = VK_SUCCESS; + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocateDedicatedMemoryPage( + pool, + size, + suballocType, + memTypeIndex, + allocInfo, + map, + isUserDataString, + isMappingAllowed, + pUserData, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { + break; + } + } + + if(res == VK_SUCCESS) + { + for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + dedicatedAllocations.Register(pAllocations[allocIndex]); + } + VMA_DEBUG_LOG_FORMAT(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%" PRIu32, allocationCount, memTypeIndex); + } + else + { + // Free all already created allocations. + while(allocIndex--) + { + VmaAllocation currAlloc = pAllocations[allocIndex]; + VkDeviceMemory hMemory = currAlloc->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(currAlloc->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory); + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize()); + m_AllocationObjectAllocator.Free(currAlloc); + } + + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + bool isMappingAllowed, + void* pUserData, + VmaAllocation* pAllocation) +{ + VkDeviceMemory hMemory = VK_NULL_HANDLE; + VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory); + if(res < 0) + { + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } + + void* pMappedData = VMA_NULL; + if(map) + { + res = (*m_VulkanFunctions.vkMapMemory)( + m_hDevice, + hMemory, + 0, + VK_WHOLE_SIZE, + 0, + &pMappedData); + if(res < 0) + { + VMA_DEBUG_LOG(" vkMapMemory FAILED"); + FreeVulkanMemory(memTypeIndex, size, hMemory); + return res; + } + } + + *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed); + (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size); + if (isUserDataString) + (*pAllocation)->SetName(this, (const char*)pUserData); + else + (*pAllocation)->SetUserData(this, pUserData); + m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.buffer = hBuffer; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VmaPnextChainPushFront(&memReq2, &memDedicatedReq); + + (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +void VmaAllocator_T::GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.image = hImage; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VmaPnextChainPushFront(&memReq2, &memDedicatedReq); + + (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +VkResult VmaAllocator_T::FindMemoryTypeIndex( + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VmaBufferImageUsage bufImgUsage, + uint32_t* pMemoryTypeIndex) const +{ + memoryTypeBits &= GetGlobalMemoryTypeBits(); + + if(pAllocationCreateInfo->memoryTypeBits != 0) + { + memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; + } + + VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0; + if(!FindMemoryPreferences( + IsIntegratedGpu(), + *pAllocationCreateInfo, + bufImgUsage, + requiredFlags, preferredFlags, notPreferredFlags)) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + *pMemoryTypeIndex = UINT32_MAX; + uint32_t minCost = UINT32_MAX; + for(uint32_t memTypeIndex = 0, memTypeBit = 1; + memTypeIndex < GetMemoryTypeCount(); + ++memTypeIndex, memTypeBit <<= 1) + { + // This memory type is acceptable according to memoryTypeBits bitmask. + if((memTypeBit & memoryTypeBits) != 0) + { + const VkMemoryPropertyFlags currFlags = + m_MemProps.memoryTypes[memTypeIndex].propertyFlags; + // This memory type contains requiredFlags. + if((requiredFlags & ~currFlags) == 0) + { + // Calculate cost as number of bits from preferredFlags not present in this memory type. + uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) + + VMA_COUNT_BITS_SET(currFlags & notPreferredFlags); + // Remember memory type with lowest cost. + if(currCost < minCost) + { + *pMemoryTypeIndex = memTypeIndex; + if(currCost == 0) + { + return VK_SUCCESS; + } + minCost = currCost; + } + } + } + } + return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT; +} + +VkResult VmaAllocator_T::CalcMemTypeParams( + VmaAllocationCreateInfo& inoutCreateInfo, + uint32_t memTypeIndex, + VkDeviceSize size, + size_t allocationCount) +{ + // If memory type is not HOST_VISIBLE, disable MAPPED. + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; + } + + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0) + { + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + VmaBudget heapBudget = {}; + GetHeapBudgets(&heapBudget, heapIndex, 1); + if(heapBudget.usage + size * allocationCount > heapBudget.budget) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + return VK_SUCCESS; +} + +VkResult VmaAllocator_T::CalcAllocationParams( + VmaAllocationCreateInfo& inoutCreateInfo, + bool dedicatedRequired, + bool dedicatedPreferred) +{ + VMA_ASSERT((inoutCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) && + "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect."); + VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 || + (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) && + "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); + if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST) + { + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0) + { + VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 && + "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); + } + } + + // If memory is lazily allocated, it should be always dedicated. + if(dedicatedRequired || + inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + if(inoutCreateInfo.pool != VK_NULL_HANDLE) + { + if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations."); + return VK_ERROR_FEATURE_NOT_PRESENT; + } + inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority(); + } + + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + // Non-auto USAGE values imply HOST_ACCESS flags. + // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools. + // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*. + // Otherwise they just protect from assert on mapping. + if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO && + inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE && + inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST) + { + if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; + } + } + + return VK_SUCCESS; +} + +VkResult VmaAllocator_T::AllocateMemory( + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VmaBufferImageUsage dedicatedBufferImageUsage, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + + VMA_ASSERT(VmaIsPow2(vkMemReq.alignment)); + + if(vkMemReq.size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VmaAllocationCreateInfo createInfoFinal = createInfo; + VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation); + if(res != VK_SUCCESS) + return res; + + if(createInfoFinal.pool != VK_NULL_HANDLE) + { + VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector; + return AllocateMemoryOfType( + createInfoFinal.pool, + vkMemReq.size, + vkMemReq.alignment, + prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + createInfoFinal, + blockVector.GetMemoryTypeIndex(), + suballocType, + createInfoFinal.pool->m_DedicatedAllocations, + blockVector, + allocationCount, + pAllocations); + } + else + { + // Bit mask of memory Vulkan types acceptable for this allocation. + uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; + uint32_t memTypeIndex = UINT32_MAX; + res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); + // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. + if(res != VK_SUCCESS) + return res; + do + { + VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(blockVector && "Trying to use unsupported memory type!"); + res = AllocateMemoryOfType( + VK_NULL_HANDLE, + vkMemReq.size, + vkMemReq.alignment, + requiresDedicatedAllocation || prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + createInfoFinal, + memTypeIndex, + suballocType, + m_DedicatedAllocations[memTypeIndex], + *blockVector, + allocationCount, + pAllocations); + // Allocation succeeded + if(res == VK_SUCCESS) + return VK_SUCCESS; + + // Remove old memTypeIndex from list of possibilities. + memoryTypeBits &= ~(1u << memTypeIndex); + // Find alternative memTypeIndex. + res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); + } while(res == VK_SUCCESS); + + // No other matching memory type index could be found. + // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } +} + +void VmaAllocator_T::FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations); + + for(size_t allocIndex = allocationCount; allocIndex--; ) + { + VmaAllocation allocation = pAllocations[allocIndex]; + + if(allocation != VK_NULL_HANDLE) + { + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); + } + + allocation->FreeName(this); + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaBlockVector* pBlockVector = VMA_NULL; + VmaPool hPool = allocation->GetParentPool(); + if(hPool != VK_NULL_HANDLE) + { + pBlockVector = &hPool->m_BlockVector; + } + else + { + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + pBlockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!"); + } + pBlockVector->Free(allocation); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + FreeDedicatedMemory(allocation); + break; + default: + VMA_ASSERT(0); + } + } + } +} + +void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats) +{ + // Initialize. + VmaClearDetailedStatistics(pStats->total); + for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) + VmaClearDetailedStatistics(pStats->memoryType[i]); + for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) + VmaClearDetailedStatistics(pStats->memoryHeap[i]); + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + if (pBlockVector != VMA_NULL) + pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + VmaBlockVector& blockVector = pool->m_BlockVector; + const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex(); + blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + } + + // Process dedicated allocations. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + + // Sum from memory types to memory heaps. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex; + VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]); + } + + // Sum from memory heaps to total. + for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex) + VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]); + + VMA_ASSERT(pStats->total.statistics.allocationCount == 0 || + pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin); + VMA_ASSERT(pStats->total.unusedRangeCount == 0 || + pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin); +} + +void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount) +{ +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + if(m_Budget.m_OperationsSinceBudgetFetch < 30) + { + VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex); + for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) + { + const uint32_t heapIndex = firstHeap + i; + + outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; + outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; + outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) + { + outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] + + outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + else + { + outBudgets->usage = 0; + } + + // Have to take MIN with heap size because explicit HeapSizeLimit is included in it. + outBudgets->budget = VMA_MIN( + m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size); + } + } + else + { + UpdateVulkanBudget(); // Outside of mutex lock + GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion + } + } + else +#endif + { + for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) + { + const uint32_t heapIndex = firstHeap + i; + + outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; + outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; + outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + outBudgets->usage = outBudgets->statistics.blockBytes; + outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + } +} + +void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo) +{ + pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); + pAllocationInfo->deviceMemory = hAllocation->GetMemory(); + pAllocationInfo->offset = hAllocation->GetOffset(); + pAllocationInfo->size = hAllocation->GetSize(); + pAllocationInfo->pMappedData = hAllocation->GetMappedData(); + pAllocationInfo->pUserData = hAllocation->GetUserData(); + pAllocationInfo->pName = hAllocation->GetName(); +} + +void VmaAllocator_T::GetAllocationInfo2(VmaAllocation hAllocation, VmaAllocationInfo2* pAllocationInfo) +{ + GetAllocationInfo(hAllocation, &pAllocationInfo->allocationInfo); + + switch (hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + pAllocationInfo->blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize(); + pAllocationInfo->dedicatedMemory = VK_FALSE; + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + pAllocationInfo->blockSize = pAllocationInfo->allocationInfo.size; + pAllocationInfo->dedicatedMemory = VK_TRUE; + break; + default: + VMA_ASSERT(0); + } +} + +VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) +{ + VMA_DEBUG_LOG_FORMAT(" CreatePool: MemoryTypeIndex=%" PRIu32 ", flags=%" PRIu32, pCreateInfo->memoryTypeIndex, pCreateInfo->flags); + + VmaPoolCreateInfo newCreateInfo = *pCreateInfo; + + // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash. + if(pCreateInfo->pMemoryAllocateNext) + { + VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0); + } + + if(newCreateInfo.maxBlockCount == 0) + { + newCreateInfo.maxBlockCount = SIZE_MAX; + } + if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + // Memory type index out of range or forbidden. + if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || + ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + if(newCreateInfo.minAllocationAlignment > 0) + { + VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment)); + } + + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex); + + *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); + + VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); + if(res != VK_SUCCESS) + { + vma_delete(this, *pPool); + *pPool = VMA_NULL; + return res; + } + + // Add to m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + (*pPool)->SetId(m_NextPoolId++); + m_Pools.PushBack(*pPool); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::DestroyPool(VmaPool pool) +{ + // Remove from m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + m_Pools.Remove(pool); + } + + vma_delete(this, pool); +} + +void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats) +{ + VmaClearStatistics(*pPoolStats); + pool->m_BlockVector.AddStatistics(*pPoolStats); + pool->m_DedicatedAllocations.AddStatistics(*pPoolStats); +} + +void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats) +{ + VmaClearDetailedStatistics(*pPoolStats); + pool->m_BlockVector.AddDetailedStatistics(*pPoolStats); + pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats); +} + +void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) +{ + m_CurrentFrameIndex.store(frameIndex); + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET +} + +VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) +{ + return hPool->m_BlockVector.CheckCorruption(); +} + +VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) +{ + VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT; + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + if(pBlockVector != VMA_NULL) + { + VkResult localRes = pBlockVector->CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) + { + VkResult localRes = pool->m_BlockVector.CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + } + + return finalRes; +} + +VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory) +{ + AtomicTransactionalIncrement deviceMemoryCountIncrement; + const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount); +#if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT + if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount) + { + return VK_ERROR_TOO_MANY_OBJECTS; + } +#endif + + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex); + + // HeapSizeLimit is in effect for this heap. + if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0) + { + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex]; + for(;;) + { + const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize; + if(blockBytesAfterAllocation > heapSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation)) + { + break; + } + } + } + else + { + m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize; + } + ++m_Budget.m_BlockCount[heapIndex]; + + // VULKAN CALL vkAllocateMemory. + VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); + + if(res == VK_SUCCESS) + { +#if VMA_MEMORY_BUDGET + ++m_Budget.m_OperationsSinceBudgetFetch; +#endif + + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData); + } + + deviceMemoryCountIncrement.Commit(); + } + else + { + --m_Budget.m_BlockCount[heapIndex]; + m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize; + } + + return res; +} + +void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) +{ + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData); + } + + // VULKAN CALL vkFreeMemory. + (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks()); + + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType); + --m_Budget.m_BlockCount[heapIndex]; + m_Budget.m_BlockBytes[heapIndex] -= size; + + --m_DeviceMemoryCount; +} + +VkResult VmaAllocator_T::BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL) + { + VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.buffer = buffer; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL) + { + VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.image = image; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData) +{ + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + char *pBytes = VMA_NULL; + VkResult res = pBlock->Map(this, 1, (void**)&pBytes); + if(res == VK_SUCCESS) + { + *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset(); + hAllocation->BlockAllocMap(); + } + return res; + } + VMA_FALLTHROUGH; // Fallthrough + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + return hAllocation->DedicatedAllocMap(this, ppData); + default: + VMA_ASSERT(0); + return VK_ERROR_MEMORY_MAP_FAILED; + } +} + +void VmaAllocator_T::Unmap(VmaAllocation hAllocation) +{ + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + hAllocation->BlockAllocUnmap(); + pBlock->Unmap(this, 1); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + hAllocation->DedicatedAllocUnmap(this); + break; + default: + VMA_ASSERT(0); + } +} + +VkResult VmaAllocator_T::BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VkResult res = VK_ERROR_UNKNOWN_COPY; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block."); + res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VkResult res = VK_ERROR_UNKNOWN_COPY; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block."); + res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op) +{ + VkResult res = VK_SUCCESS; + + VkMappedMemoryRange memRange = {}; + if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange)) + { + switch(op) + { + case VMA_CACHE_FLUSH: + res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + case VMA_CACHE_INVALIDATE: + res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. + return res; +} + +VkResult VmaAllocator_T::FlushOrInvalidateAllocations( + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, const VkDeviceSize* sizes, + VMA_CACHE_OPERATION op) +{ + typedef VmaStlAllocator RangeAllocator; + typedef VmaSmallVector RangeVector; + RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks())); + + for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + const VmaAllocation alloc = allocations[allocIndex]; + const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0; + const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE; + VkMappedMemoryRange newRange; + if(GetFlushOrInvalidateRange(alloc, offset, size, newRange)) + { + ranges.push_back(newRange); + } + } + + VkResult res = VK_SUCCESS; + if(!ranges.empty()) + { + switch(op) + { + case VMA_CACHE_FLUSH: + res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); + break; + case VMA_CACHE_INVALIDATE: + res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. + return res; +} + +VkResult VmaAllocator_T::CopyMemoryToAllocation( + const void* pSrcHostPointer, + VmaAllocation dstAllocation, + VkDeviceSize dstAllocationLocalOffset, + VkDeviceSize size) +{ + void* dstMappedData = VMA_NULL; + VkResult res = Map(dstAllocation, &dstMappedData); + if(res == VK_SUCCESS) + { + memcpy((char*)dstMappedData + dstAllocationLocalOffset, pSrcHostPointer, (size_t)size); + Unmap(dstAllocation); + res = FlushOrInvalidateAllocation(dstAllocation, dstAllocationLocalOffset, size, VMA_CACHE_FLUSH); + } + return res; +} + +VkResult VmaAllocator_T::CopyAllocationToMemory( + VmaAllocation srcAllocation, + VkDeviceSize srcAllocationLocalOffset, + void* pDstHostPointer, + VkDeviceSize size) +{ + void* srcMappedData = VMA_NULL; + VkResult res = Map(srcAllocation, &srcMappedData); + if(res == VK_SUCCESS) + { + res = FlushOrInvalidateAllocation(srcAllocation, srcAllocationLocalOffset, size, VMA_CACHE_INVALIDATE); + if(res == VK_SUCCESS) + { + memcpy(pDstHostPointer, (const char*)srcMappedData + srcAllocationLocalOffset, (size_t)size); + Unmap(srcAllocation); + } + } + return res; +} + +void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) +{ + VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + VmaPool parentPool = allocation->GetParentPool(); + if(parentPool == VK_NULL_HANDLE) + { + // Default pool + m_DedicatedAllocations[memTypeIndex].Unregister(allocation); + } + else + { + // Custom pool + parentPool->m_DedicatedAllocations.Unregister(allocation); + } + + VkDeviceMemory hMemory = allocation->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(allocation->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory); + + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize()); + m_AllocationObjectAllocator.Free(allocation); + + VMA_DEBUG_LOG_FORMAT(" Freed DedicatedMemory MemoryTypeIndex=%" PRIu32, memTypeIndex); +} + +uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const +{ + VkBufferCreateInfo dummyBufCreateInfo; + VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo); + + uint32_t memoryTypeBits = 0; + + // Create buffer. + VkBuffer buf = VK_NULL_HANDLE; + VkResult res = (*GetVulkanFunctions().vkCreateBuffer)( + m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf); + if(res == VK_SUCCESS) + { + // Query for supported memory types. + VkMemoryRequirements memReq; + (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq); + memoryTypeBits = memReq.memoryTypeBits; + + // Destroy buffer. + (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks()); + } + + return memoryTypeBits; +} + +uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const +{ + // Make sure memory information is already fetched. + VMA_ASSERT(GetMemoryTypeCount() > 0); + + uint32_t memoryTypeBits = UINT32_MAX; + + if(!m_UseAmdDeviceCoherentMemory) + { + // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) + { + memoryTypeBits &= ~(1u << memTypeIndex); + } + } + } + + return memoryTypeBits; +} + +bool VmaAllocator_T::GetFlushOrInvalidateRange( + VmaAllocation allocation, + VkDeviceSize offset, VkDeviceSize size, + VkMappedMemoryRange& outRange) const +{ + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) + { + const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; + const VkDeviceSize allocationSize = allocation->GetSize(); + VMA_ASSERT(offset <= allocationSize); + + outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; + outRange.pNext = VMA_NULL; + outRange.memory = allocation->GetMemory(); + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + outRange.size = allocationSize - outRange.offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + outRange.size = VMA_MIN( + VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize), + allocationSize - outRange.offset); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + // 1. Still within this allocation. + outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + size = allocationSize - offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + } + outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize); + + // 2. Adjust to whole block. + const VkDeviceSize allocationOffset = allocation->GetOffset(); + VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0); + const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize(); + outRange.offset += allocationOffset; + outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset); + + break; + } + default: + VMA_ASSERT(0); + } + return true; + } + return false; +} + +#if VMA_MEMORY_BUDGET +void VmaAllocator_T::UpdateVulkanBudget() +{ + VMA_ASSERT(m_UseExtMemoryBudget); + + VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR }; + + VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT }; + VmaPnextChainPushFront(&memProps, &budgetProps); + + GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps); + + { + VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex); + + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex]; + m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex]; + m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load(); + + // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size. + if(m_Budget.m_VulkanBudget[heapIndex] == 0) + { + m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size) + { + m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size; + } + if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0) + { + m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + } + m_Budget.m_OperationsSinceBudgetFetch = 0; + } +} +#endif // VMA_MEMORY_BUDGET + +void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) +{ + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS && + hAllocation->IsMappingAllowed() && + (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + { + void* pData = VMA_NULL; + VkResult res = Map(hAllocation, &pData); + if(res == VK_SUCCESS) + { + memset(pData, (int)pattern, (size_t)hAllocation->GetSize()); + FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH); + Unmap(hAllocation); + } + else + { + VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation."); + } + } +} + +uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() +{ + uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load(); + if(memoryTypeBits == UINT32_MAX) + { + memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits(); + m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits); + } + return memoryTypeBits; +} + +#if VMA_STATS_STRING_ENABLED +void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) +{ + json.WriteString("DefaultPools"); + json.BeginObject(); + { + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex]; + VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex]; + if (pBlockVector != VMA_NULL) + { + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + json.BeginObject(); + { + json.WriteString("PreferredBlockSize"); + json.WriteNumber(pBlockVector->GetPreferredBlockSize()); + + json.WriteString("Blocks"); + pBlockVector->PrintDetailedMap(json); + + json.WriteString("DedicatedAllocations"); + dedicatedAllocList.BuildStatsString(json); + } + json.EndObject(); + } + } + } + json.EndObject(); + + json.WriteString("CustomPools"); + json.BeginObject(); + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + if (!m_Pools.IsEmpty()) + { + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + bool displayType = true; + size_t index = 0; + for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + VmaBlockVector& blockVector = pool->m_BlockVector; + if (blockVector.GetMemoryTypeIndex() == memTypeIndex) + { + if (displayType) + { + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + json.BeginArray(); + displayType = false; + } + + json.BeginObject(); + { + json.WriteString("Name"); + json.BeginString(); + json.ContinueString((uint64_t)index++); + if (pool->GetName()) + { + json.ContinueString(" - "); + json.ContinueString(pool->GetName()); + } + json.EndString(); + + json.WriteString("PreferredBlockSize"); + json.WriteNumber(blockVector.GetPreferredBlockSize()); + + json.WriteString("Blocks"); + blockVector.PrintDetailedMap(json); + + json.WriteString("DedicatedAllocations"); + pool->m_DedicatedAllocations.BuildStatsString(json); + } + json.EndObject(); + } + } + + if (!displayType) + json.EndArray(); + } + } + } + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_ALLOCATOR_T_FUNCTIONS + + +#ifndef _VMA_PUBLIC_INTERFACE +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* pCreateInfo, + VmaAllocator* pAllocator) +{ + VMA_ASSERT(pCreateInfo && pAllocator); + VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 || + (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3)); + VMA_DEBUG_LOG("vmaCreateAllocator"); + *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo); + VkResult result = (*pAllocator)->Init(pCreateInfo); + if(result < 0) + { + vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator); + *pAllocator = VK_NULL_HANDLE; + } + return result; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator allocator) +{ + if(allocator != VK_NULL_HANDLE) + { + VMA_DEBUG_LOG("vmaDestroyAllocator"); + VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying. + vma_delete(&allocationCallbacks, allocator); + } +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo) +{ + VMA_ASSERT(allocator && pAllocatorInfo); + pAllocatorInfo->instance = allocator->m_hInstance; + pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice(); + pAllocatorInfo->device = allocator->m_hDevice; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator allocator, + const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceProperties); + *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator allocator, + const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties); + *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* pFlags) +{ + VMA_ASSERT(allocator && pFlags); + VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount()); + *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator allocator, + uint32_t frameIndex) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->SetCurrentFrameIndex(frameIndex); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( + VmaAllocator allocator, + VmaTotalStatistics* pStats) +{ + VMA_ASSERT(allocator && pStats); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->CalculateStatistics(pStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( + VmaAllocator allocator, + VmaBudget* pBudgets) +{ + VMA_ASSERT(allocator && pBudgets); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount()); +} + +#if VMA_STATS_STRING_ENABLED + +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator allocator, + char** ppStatsString, + VkBool32 detailedMap) +{ + VMA_ASSERT(allocator && ppStatsString); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VmaStringBuilder sb(allocator->GetAllocationCallbacks()); + { + VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; + allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount()); + + VmaTotalStatistics stats; + allocator->CalculateStatistics(&stats); + + VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb); + json.BeginObject(); + { + json.WriteString("General"); + json.BeginObject(); + { + const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties; + const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps; + + json.WriteString("API"); + json.WriteString("Vulkan"); + + json.WriteString("apiVersion"); + json.BeginString(); + json.ContinueString(VK_VERSION_MAJOR(deviceProperties.apiVersion)); + json.ContinueString("."); + json.ContinueString(VK_VERSION_MINOR(deviceProperties.apiVersion)); + json.ContinueString("."); + json.ContinueString(VK_VERSION_PATCH(deviceProperties.apiVersion)); + json.EndString(); + + json.WriteString("GPU"); + json.WriteString(deviceProperties.deviceName); + json.WriteString("deviceType"); + json.WriteNumber(static_cast(deviceProperties.deviceType)); + + json.WriteString("maxMemoryAllocationCount"); + json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount); + json.WriteString("bufferImageGranularity"); + json.WriteNumber(deviceProperties.limits.bufferImageGranularity); + json.WriteString("nonCoherentAtomSize"); + json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize); + + json.WriteString("memoryHeapCount"); + json.WriteNumber(memoryProperties.memoryHeapCount); + json.WriteString("memoryTypeCount"); + json.WriteNumber(memoryProperties.memoryTypeCount); + } + json.EndObject(); + } + { + json.WriteString("Total"); + VmaPrintDetailedStatistics(json, stats.total); + } + { + json.WriteString("MemoryInfo"); + json.BeginObject(); + { + for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) + { + json.BeginString("Heap "); + json.ContinueString(heapIndex); + json.EndString(); + json.BeginObject(); + { + const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex]; + json.WriteString("Flags"); + json.BeginArray(true); + { + if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) + json.WriteString("DEVICE_LOCAL"); + #if VMA_VULKAN_VERSION >= 1001000 + if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT) + json.WriteString("MULTI_INSTANCE"); + #endif + + VkMemoryHeapFlags flags = heapInfo.flags & + ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT + #if VMA_VULKAN_VERSION >= 1001000 + | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT + #endif + ); + if (flags != 0) + json.WriteNumber(flags); + } + json.EndArray(); + + json.WriteString("Size"); + json.WriteNumber(heapInfo.size); + + json.WriteString("Budget"); + json.BeginObject(); + { + json.WriteString("BudgetBytes"); + json.WriteNumber(budgets[heapIndex].budget); + json.WriteString("UsageBytes"); + json.WriteNumber(budgets[heapIndex].usage); + } + json.EndObject(); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]); + + json.WriteString("MemoryPools"); + json.BeginObject(); + { + for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) + { + if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) + { + json.BeginString("Type "); + json.ContinueString(typeIndex); + json.EndString(); + json.BeginObject(); + { + json.WriteString("Flags"); + json.BeginArray(true); + { + VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags; + if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) + json.WriteString("DEVICE_LOCAL"); + if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) + json.WriteString("HOST_VISIBLE"); + if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) + json.WriteString("HOST_COHERENT"); + if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) + json.WriteString("HOST_CACHED"); + if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) + json.WriteString("LAZILY_ALLOCATED"); + #if VMA_VULKAN_VERSION >= 1001000 + if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) + json.WriteString("PROTECTED"); + #endif + #if VK_AMD_device_coherent_memory + if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) + json.WriteString("DEVICE_COHERENT_AMD"); + if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) + json.WriteString("DEVICE_UNCACHED_AMD"); + #endif + + flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT + #if VMA_VULKAN_VERSION >= 1001000 + | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT + #endif + #if VK_AMD_device_coherent_memory + | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY + | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY + #endif + | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT + | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT + | VK_MEMORY_PROPERTY_HOST_CACHED_BIT); + if (flags != 0) + json.WriteNumber(flags); + } + json.EndArray(); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]); + } + json.EndObject(); + } + } + + } + json.EndObject(); + } + json.EndObject(); + } + } + json.EndObject(); + } + + if (detailedMap == VK_TRUE) + allocator->PrintDetailedMap(json); + + json.EndObject(); + } + + *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength()); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator allocator, + char* pStatsString) +{ + if(pStatsString != VMA_NULL) + { + VMA_ASSERT(allocator); + VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString); + } +} + +#endif // VMA_STATS_STRING_ENABLED + +/* +This function is not protected by any mutex because it just reads immutable data. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, VmaBufferImageUsage::UNKNOWN, pMemoryTypeIndex); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pBufferCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); + VkResult res; + +#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + if(funcs->vkGetDeviceBufferMemoryRequirements) + { + // Can query straight from VkBufferCreateInfo :) + VkDeviceBufferMemoryRequirementsKHR devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS_KHR}; + devBufMemReq.pCreateInfo = pBufferCreateInfo; + + VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; + (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, + VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), pMemoryTypeIndex); + } + else +#endif // VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + { + // Must create a dummy buffer to query :( + VkBuffer hBuffer = VK_NULL_HANDLE; + res = funcs->vkCreateBuffer( + hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryTypeBits, pAllocationCreateInfo, + VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), pMemoryTypeIndex); + + funcs->vkDestroyBuffer( + hDev, hBuffer, allocator->GetAllocationCallbacks()); + } + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pImageCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); + VkResult res; + +#if VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + if(funcs->vkGetDeviceImageMemoryRequirements) + { + // Can query straight from VkImageCreateInfo :) + VkDeviceImageMemoryRequirementsKHR devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS_KHR}; + devImgMemReq.pCreateInfo = pImageCreateInfo; + VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 && + "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect."); + + VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; + (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, + VmaBufferImageUsage(*pImageCreateInfo), pMemoryTypeIndex); + } + else +#endif // VMA_KHR_MAINTENANCE4 || VMA_VULKAN_VERSION >= 1003000 + { + // Must create a dummy image to query :( + VkImage hImage = VK_NULL_HANDLE; + res = funcs->vkCreateImage( + hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryTypeBits, pAllocationCreateInfo, + VmaBufferImageUsage(*pImageCreateInfo), pMemoryTypeIndex); + + funcs->vkDestroyImage( + hDev, hImage, allocator->GetAllocationCallbacks()); + } + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator allocator, + const VmaPoolCreateInfo* pCreateInfo, + VmaPool* pPool) +{ + VMA_ASSERT(allocator && pCreateInfo && pPool); + + VMA_DEBUG_LOG("vmaCreatePool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CreatePool(pCreateInfo, pPool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator allocator, + VmaPool pool) +{ + VMA_ASSERT(allocator); + + if(pool == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyPool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->DestroyPool(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( + VmaAllocator allocator, + VmaPool pool, + VmaStatistics* pPoolStats) +{ + VMA_ASSERT(allocator && pool && pPoolStats); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetPoolStatistics(pool, pPoolStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( + VmaAllocator allocator, + VmaPool pool, + VmaDetailedStatistics* pPoolStats) +{ + VMA_ASSERT(allocator && pool && pPoolStats); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->CalculatePoolStatistics(pool, pPoolStats); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VMA_DEBUG_LOG("vmaCheckPoolCorruption"); + + return allocator->CheckPoolCorruption(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char** ppName) +{ + VMA_ASSERT(allocator && pool && ppName); + + VMA_DEBUG_LOG("vmaGetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *ppName = pool->GetName(); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char* pName) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_LOG("vmaSetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + pool->SetName(pName); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + size_t allocationCount, + VmaAllocation* pAllocations, + VmaAllocationInfo* pAllocationInfo) +{ + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations); + + VMA_DEBUG_LOG("vmaAllocateMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + allocationCount, + pAllocations); + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + for(size_t i = 0; i < allocationCount; ++i) + { + allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i); + } + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator allocator, + VkBuffer buffer, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(buffer, vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + buffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator allocator, + VkImage image, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(image, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + image, // dedicatedImage + VmaBufferImageUsage::UNKNOWN, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaFreeMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->FreeMemory( + 1, // allocationCount + &allocation); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator allocator, + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + if(allocationCount == 0) + { + return; + } + + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaFreeMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->FreeMemory(allocationCount, pAllocations); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator allocator, + VmaAllocation allocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && allocation && pAllocationInfo); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetAllocationInfo(allocation, pAllocationInfo); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo2( + VmaAllocator allocator, + VmaAllocation allocation, + VmaAllocationInfo2* pAllocationInfo) +{ + VMA_ASSERT(allocator && allocation && pAllocationInfo); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetAllocationInfo2(allocation, pAllocationInfo); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator allocator, + VmaAllocation allocation, + void* pUserData) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocation->SetUserData(allocator, pUserData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const char* VMA_NULLABLE pName) +{ + allocation->SetName(allocator, pName); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags) +{ + VMA_ASSERT(allocator && allocation && pFlags); + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator allocator, + VmaAllocation allocation, + void** ppData) +{ + VMA_ASSERT(allocator && allocation && ppData); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->Map(allocation, ppData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->Unmap(allocation); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize offset, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaFlushAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize offset, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaInvalidateAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator allocator, + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, + const VkDeviceSize* sizes) +{ + VMA_ASSERT(allocator); + + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocations); + + VMA_DEBUG_LOG("vmaFlushAllocations"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator allocator, + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, + const VkDeviceSize* sizes) +{ + VMA_ASSERT(allocator); + + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocations); + + VMA_DEBUG_LOG("vmaInvalidateAllocations"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyMemoryToAllocation( + VmaAllocator allocator, + const void* pSrcHostPointer, + VmaAllocation dstAllocation, + VkDeviceSize dstAllocationLocalOffset, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && pSrcHostPointer && dstAllocation); + + if(size == 0) + { + return VK_SUCCESS; + } + + VMA_DEBUG_LOG("vmaCopyMemoryToAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CopyMemoryToAllocation(pSrcHostPointer, dstAllocation, dstAllocationLocalOffset, size); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCopyAllocationToMemory( + VmaAllocator allocator, + VmaAllocation srcAllocation, + VkDeviceSize srcAllocationLocalOffset, + void* pDstHostPointer, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && srcAllocation && pDstHostPointer); + + if(size == 0) + { + return VK_SUCCESS; + } + + VMA_DEBUG_LOG("vmaCopyAllocationToMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CopyAllocationToMemory(srcAllocation, srcAllocationLocalOffset, pDstHostPointer, size); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( + VmaAllocator allocator, + uint32_t memoryTypeBits) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaCheckCorruption"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CheckCorruption(memoryTypeBits); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( + VmaAllocator allocator, + const VmaDefragmentationInfo* pInfo, + VmaDefragmentationContext* pContext) +{ + VMA_ASSERT(allocator && pInfo && pContext); + + VMA_DEBUG_LOG("vmaBeginDefragmentation"); + + if (pInfo->pool != VMA_NULL) + { + // Check if run on supported algorithms + if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo); + return VK_SUCCESS; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( + VmaAllocator allocator, + VmaDefragmentationContext context, + VmaDefragmentationStats* pStats) +{ + VMA_ASSERT(allocator && context); + + VMA_DEBUG_LOG("vmaEndDefragmentation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if (pStats) + context->GetStats(*pStats); + vma_delete(allocator, context); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) +{ + VMA_ASSERT(context && pPassInfo); + + VMA_DEBUG_LOG("vmaBeginDefragmentationPass"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return context->DefragmentPassBegin(*pPassInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) +{ + VMA_ASSERT(context && pPassInfo); + + VMA_DEBUG_LOG("vmaEndDefragmentationPass"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return context->DefragmentPassEnd(*pPassInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkBuffer buffer) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer buffer, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkImage image) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, 0, image, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkImage image, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation); + + if(pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_LOG("vmaCreateBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pBuffer = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if(res >= 0) + { + // 2. vkGetBufferMemoryRequirements. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + // 3. Allocate memory using allocator. + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pBuffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), // dedicatedBufferImageUsage + *pAllocationCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + + if(res >= 0) + { + // 3. Bind buffer with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkDeviceSize minAlignment, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation); + + if(pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_LOG("vmaCreateBufferWithAlignment"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pBuffer = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if(res >= 0) + { + // 2. vkGetBufferMemoryRequirements. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + // 2a. Include minAlignment + vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment); + + // 3. Allocate memory using allocator. + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pBuffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + VmaBufferImageUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5), // dedicatedBufferImageUsage + *pAllocationCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + + if(res >= 0) + { + // 3. Bind buffer with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferUsage(*pBufferCreateInfo, allocator->m_UseKhrMaintenance5); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) +{ + return vmaCreateAliasingBuffer2(allocator, allocation, 0, pBufferCreateInfo, pBuffer); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation); + VMA_ASSERT(allocationLocalOffset + pBufferCreateInfo->size <= allocation->GetSize()); + + VMA_DEBUG_LOG("vmaCreateAliasingBuffer2"); + + *pBuffer = VK_NULL_HANDLE; + + if (pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if (res >= 0) + { + // 2. Bind buffer with memory. + res = allocator->BindBufferMemory(allocation, allocationLocalOffset, *pBuffer, VMA_NULL); + if (res >= 0) + { + return VK_SUCCESS; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator allocator, + VkBuffer buffer, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(buffer != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks()); + } + + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkImage* pImage, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation); + + if(pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_LOG("vmaCreateImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pImage = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkImage. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( + allocator->m_hDevice, + pImageCreateInfo, + allocator->GetAllocationCallbacks(), + pImage); + if(res >= 0) + { + VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; + + // 2. Allocate memory using allocator. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(*pImage, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + *pImage, // dedicatedImage + VmaBufferImageUsage(*pImageCreateInfo), // dedicatedBufferImageUsage + *pAllocationCreateInfo, + suballocType, + 1, // allocationCount + pAllocation); + + if(res >= 0) + { + // 3. Bind image with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitImageUsage(*pImageCreateInfo); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) +{ + return vmaCreateAliasingImage2(allocator, allocation, 0, pImageCreateInfo, pImage); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation); + + *pImage = VK_NULL_HANDLE; + + VMA_DEBUG_LOG("vmaCreateImage2"); + + if (pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + // 1. Create VkImage. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( + allocator->m_hDevice, + pImageCreateInfo, + allocator->GetAllocationCallbacks(), + pImage); + if (res >= 0) + { + // 2. Bind image with memory. + res = allocator->BindImageMemory(allocation, allocationLocalOffset, *pImage, VMA_NULL); + if (res >= 0) + { + return VK_SUCCESS; + } + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NULLABLE_NON_DISPATCHABLE image, + VmaAllocation VMA_NULLABLE allocation) +{ + VMA_ASSERT(allocator); + + if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(image != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks()); + } + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( + const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock) +{ + VMA_ASSERT(pCreateInfo && pVirtualBlock); + VMA_ASSERT(pCreateInfo->size > 0); + VMA_DEBUG_LOG("vmaCreateVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo); + VkResult res = (*pVirtualBlock)->Init(); + if(res < 0) + { + vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock); + *pVirtualBlock = VK_NULL_HANDLE; + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock) +{ + if(virtualBlock != VK_NULL_HANDLE) + { + VMA_DEBUG_LOG("vmaDestroyVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying. + vma_delete(&allocationCallbacks, virtualBlock); + } +} + +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL); + VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, + VkDeviceSize* VMA_NULLABLE pOffset) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL); + VMA_DEBUG_LOG("vmaVirtualAllocate"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation) +{ + if(allocation != VK_NULL_HANDLE) + { + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaVirtualFree"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->Free(allocation); + } +} + +VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaClearVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->Clear(); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->SetAllocationUserData(allocation, pUserData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaStatistics* VMA_NOT_NULL pStats) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); + VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->GetStatistics(*pStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaDetailedStatistics* VMA_NOT_NULL pStats) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); + VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->CalculateDetailedStatistics(*pStats); +} + +#if VMA_STATS_STRING_ENABLED + +VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks(); + VmaStringBuilder sb(allocationCallbacks); + virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb); + *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength()); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE pStatsString) +{ + if(pStatsString != VMA_NULL) + { + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString); + } +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_PUBLIC_INTERFACE +#endif // VMA_IMPLEMENTATION + +/** +\page quick_start Quick start + +\section quick_start_project_setup Project setup + +Vulkan Memory Allocator comes in form of a "stb-style" single header file. +While you can pull the entire repository e.g. as Git module, there is also Cmake script provided, +you don't need to build it as a separate library project. +You can add file "vk_mem_alloc.h" directly to your project and submit it to code repository next to your other source files. + +"Single header" doesn't mean that everything is contained in C/C++ declarations, +like it tends to be in case of inline functions or C++ templates. +It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro. +If you don't do it properly, it will result in linker errors. + +To do it properly: + +-# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library. + This includes declarations of all members of the library. +-# In exactly one CPP file define following macro before this include. + It enables also internal definitions. + +\code +#define VMA_IMPLEMENTATION +#include "vk_mem_alloc.h" +\endcode + +It may be a good idea to create dedicated CPP file just for this purpose, e.g. "VmaUsage.cpp". + +This library includes header ``, which in turn +includes `` on Windows. If you need some specific macros defined +before including these headers (like `WIN32_LEAN_AND_MEAN` or +`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define +them before every `#include` of this library. +It may be a good idea to create a dedicate header file for this purpose, e.g. "VmaUsage.h", +that will be included in other source files instead of VMA header directly. + +This library is written in C++, but has C-compatible interface. +Thus, you can include and use "vk_mem_alloc.h" in C or C++ code, but full +implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C. +Some features of C++14 are used and required. Features of C++20 are used optionally when available. +Some headers of standard C and C++ library are used, but STL containers, RTTI, or C++ exceptions are not used. + + +\section quick_start_initialization Initialization + +VMA offers library interface in a style similar to Vulkan, with object handles like #VmaAllocation, +structures describing parameters of objects to be created like #VmaAllocationCreateInfo, +and errors codes returned from functions using `VkResult` type. + +The first and the main object that needs to be created is #VmaAllocator. +It represents the initialization of the entire library. +Only one such object should be created per `VkDevice`. +You should create it at program startup, after `VkDevice` was created, and before any device memory allocator needs to be made. +It must be destroyed before `VkDevice` is destroyed. + +At program startup: + +-# Initialize Vulkan to have `VkInstance`, `VkPhysicalDevice`, `VkDevice` object. +-# Fill VmaAllocatorCreateInfo structure and call vmaCreateAllocator() to create #VmaAllocator object. + +Only members `physicalDevice`, `device`, `instance` are required. +However, you should inform the library which Vulkan version do you use by setting +VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable +by setting VmaAllocatorCreateInfo::flags. +Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions. +See below for details. + +\subsection quick_start_initialization_selecting_vulkan_version Selecting Vulkan version + +VMA supports Vulkan version down to 1.0, for backward compatibility. +If you want to use higher version, you need to inform the library about it. +This is a two-step process. + +Step 1: Compile time. By default, VMA compiles with code supporting the highest +Vulkan version found in the included `` that is also supported by the library. +If this is OK, you don't need to do anything. +However, if you want to compile VMA as if only some lower Vulkan version was available, +define macro `VMA_VULKAN_VERSION` before every `#include "vk_mem_alloc.h"`. +It should have decimal numeric value in form of ABBBCCC, where A = major, BBB = minor, CCC = patch Vulkan version. +For example, to compile against Vulkan 1.2: + +\code +#define VMA_VULKAN_VERSION 1002000 // Vulkan 1.2 +#include "vk_mem_alloc.h" +\endcode + +Step 2: Runtime. Even when compiled with higher Vulkan version available, +VMA can use only features of a lower version, which is configurable during creation of the #VmaAllocator object. +By default, only Vulkan 1.0 is used. +To initialize the allocator with support for higher Vulkan version, you need to set member +VmaAllocatorCreateInfo::vulkanApiVersion to an appropriate value, e.g. using constants like `VK_API_VERSION_1_2`. +See code sample below. + +\subsection quick_start_initialization_importing_vulkan_functions Importing Vulkan functions + +You may need to configure importing Vulkan functions. There are 3 ways to do this: + +-# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows): + - You don't need to do anything. + - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default. +-# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`, + `vkGetDeviceProcAddr` (this is the option presented in the example below): + - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1. + - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr, + VmaVulkanFunctions::vkGetDeviceProcAddr. + - The library will fetch pointers to all other functions it needs internally. +-# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like + [Volk](https://github.com/zeux/volk): + - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0. + - Pass these pointers via structure #VmaVulkanFunctions. + +\subsection quick_start_initialization_enabling_extensions Enabling extensions + +VMA can automatically use following Vulkan extensions. +If you found them available on the selected physical device and you enabled them +while creating `VkInstance` / `VkDevice` object, inform VMA about their availability +by setting appropriate flags in VmaAllocatorCreateInfo::flags. + +Vulkan extension | VMA flag +------------------------------|----------------------------------------------------- +VK_KHR_dedicated_allocation | #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT +VK_KHR_bind_memory2 | #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT +VK_KHR_maintenance4 | #VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE4_BIT +VK_KHR_maintenance5 | #VMA_ALLOCATOR_CREATE_KHR_MAINTENANCE5_BIT +VK_EXT_memory_budget | #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT +VK_KHR_buffer_device_address | #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT +VK_EXT_memory_priority | #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT +VK_AMD_device_coherent_memory | #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT + +Example with fetching pointers to Vulkan functions dynamically: + +\code +#define VMA_STATIC_VULKAN_FUNCTIONS 0 +#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 +#include "vk_mem_alloc.h" + +... + +VmaVulkanFunctions vulkanFunctions = {}; +vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr; +vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr; + +VmaAllocatorCreateInfo allocatorCreateInfo = {}; +allocatorCreateInfo.flags = VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT; +allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2; +allocatorCreateInfo.physicalDevice = physicalDevice; +allocatorCreateInfo.device = device; +allocatorCreateInfo.instance = instance; +allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions; + +VmaAllocator allocator; +vmaCreateAllocator(&allocatorCreateInfo, &allocator); + +// Entire program... + +// At the end, don't forget to: +vmaDestroyAllocator(allocator); +\endcode + + +\subsection quick_start_initialization_other_config Other configuration options + +There are additional configuration options available through preprocessor macros that you can define +before including VMA header and through parameters passed in #VmaAllocatorCreateInfo. +They include a possibility to use your own callbacks for host memory allocations (`VkAllocationCallbacks`), +callbacks for device memory allocations (instead of `vkAllocateMemory`, `vkFreeMemory`), +or your custom `VMA_ASSERT` macro, among others. +For more information, see: @ref configuration. + + +\section quick_start_resource_allocation Resource allocation + +When you want to create a buffer or image: + +-# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure. +-# Fill VmaAllocationCreateInfo structure. +-# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory + already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +Don't forget to destroy your buffer and allocation objects when no longer needed: + +\code +vmaDestroyBuffer(allocator, buffer, allocation); +\endcode + +If you need to map the buffer, you must set flag +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT +in VmaAllocationCreateInfo::flags. +There are many additional parameters that can control the choice of memory type to be used for the allocation +and other features. +For more information, see documentation chapters: @ref choosing_memory_type, @ref memory_mapping. + + +\page choosing_memory_type Choosing memory type + +Physical devices in Vulkan support various combinations of memory heaps and +types. Help with choosing correct and optimal memory type for your specific +resource is one of the key features of this library. You can use it by filling +appropriate members of VmaAllocationCreateInfo structure, as described below. +You can also combine multiple methods. + +-# If you just want to find memory type index that meets your requirements, you + can use function: vmaFindMemoryTypeIndexForBufferInfo(), + vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex(). +-# If you want to allocate a region of device memory without association with any + specific image or buffer, you can use function vmaAllocateMemory(). Usage of + this function is not recommended and usually not needed. + vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once, + which may be useful for sparse binding. +-# If you already have a buffer or an image created, you want to allocate memory + for it and then you will bind it yourself, you can use function + vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). + For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() + or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2(). +-# If you want to create a buffer or an image, allocate memory for it, and bind + them together, all in one call, you can use function vmaCreateBuffer(), + vmaCreateImage(). + This is the easiest and recommended way to use this library! + +When using 3. or 4., the library internally queries Vulkan for memory types +supported for that buffer or image (function `vkGetBufferMemoryRequirements()`) +and uses only one of these types. + +If no memory type can be found that meets all the requirements, these functions +return `VK_ERROR_FEATURE_NOT_PRESENT`. + +You can leave VmaAllocationCreateInfo structure completely filled with zeros. +It means no requirements are specified for memory type. +It is valid, although not very useful. + +\section choosing_memory_type_usage Usage + +The easiest way to specify memory requirements is to fill member +VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage. +It defines high level, common usage types. +Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically. + +For example, if you want to create a uniform buffer that will be filled using +transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can +do it using following code. The buffer will most likely end up in a memory type with +`VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory +on systems with discrete graphics card that have the memories separate, you can use +#VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST. + +When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory, +you also need to specify one of the host access flags: +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +so you can map it. + +For example, a staging buffer that will be filled via mapped pointer and then +used as a source of transfer to the buffer described previously can be created like this. +It will likely end up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT` +but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM). + +\code +VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +stagingBufferInfo.size = 65536; +stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo stagingAllocInfo = {}; +stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO; +stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; + +VkBuffer stagingBuffer; +VmaAllocation stagingAllocation; +vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr); +\endcode + +For more examples of creating different kinds of resources, see chapter \ref usage_patterns. +See also: @ref memory_mapping. + +Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows +about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed, +so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc. +If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting +memory type, as described below. + +\note +Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`, +`VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`) +are still available and work same way as in previous versions of the library +for backward compatibility, but they are deprecated. + +\section choosing_memory_type_required_preferred_flags Required and preferred flags + +You can specify more detailed requirements by filling members +VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags +with a combination of bits from enum `VkMemoryPropertyFlags`. For example, +if you want to create a buffer that will be persistently mapped on host (so it +must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`, +use following code: + +\code +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; +allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; +allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +A memory type is chosen that has all the required flags and as many preferred +flags set as possible. + +Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags, +plus some extra "magic" (heuristics). + +\section choosing_memory_type_explicit_memory_types Explicit memory types + +If you inspected memory types available on the physical device and you have +a preference for memory types that you want to use, you can fill member +VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set +means that a memory type with that index is allowed to be used for the +allocation. Special value 0, just like `UINT32_MAX`, means there are no +restrictions to memory type index. + +Please note that this member is NOT just a memory type index. +Still you can use it to choose just one, specific memory type. +For example, if you already determined that your buffer should be created in +memory type 2, use following code: + +\code +uint32_t memoryTypeIndex = 2; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.memoryTypeBits = 1u << memoryTypeIndex; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +You can also use this parameter to exclude some memory types. +If you inspect memory heaps and types available on the current physical device and +you determine that for some reason you don't want to use a specific memory type for the allocation, +you can enable automatic memory type selection but exclude certain memory type or types +by setting all bits of `memoryTypeBits` to 1 except the ones you choose. + +\code +// ... +uint32_t excludedMemoryTypeIndex = 2; +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocInfo.memoryTypeBits = ~(1u << excludedMemoryTypeIndex); +// ... +\endcode + + +\section choosing_memory_type_custom_memory_pools Custom memory pools + +If you allocate from custom memory pool, all the ways of specifying memory +requirements described above are not applicable and the aforementioned members +of VmaAllocationCreateInfo structure are ignored. Memory type is selected +explicitly when creating the pool and then used to make all the allocations from +that pool. For further details, see \ref custom_memory_pools. + +\section choosing_memory_type_dedicated_allocations Dedicated allocations + +Memory for allocations is reserved out of larger block of `VkDeviceMemory` +allocated from Vulkan internally. That is the main feature of this whole library. +You can still request a separate memory block to be created for an allocation, +just like you would do in a trivial solution without using any allocator. +In that case, a buffer or image is always bound to that memory at offset 0. +This is called a "dedicated allocation". +You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +The library can also internally decide to use dedicated allocation in some cases, e.g.: + +- When the size of the allocation is large. +- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled + and it reports that dedicated allocation is required or recommended for the resource. +- When allocation of next big memory block fails due to not enough device memory, + but allocation with the exact requested size succeeds. + + +\page memory_mapping Memory mapping + +To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`, +to be able to read from it or write to it in CPU code. +Mapping is possible only of memory allocated from a memory type that has +`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. +Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose. +You can use them directly with memory allocated by this library, +but it is not recommended because of following issue: +Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed. +This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan. +It is also not thread-safe. +Because of this, Vulkan Memory Allocator provides following facilities: + +\note If you want to be able to map an allocation, you need to specify one of the flags +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT +in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable +when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values. +For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable, +but these flags can still be used for consistency. + +\section memory_mapping_copy_functions Copy functions + +The easiest way to copy data from a host pointer to an allocation is to use convenience function vmaCopyMemoryToAllocation(). +It automatically maps the Vulkan memory temporarily (if not already mapped), performs `memcpy`, +and calls `vkFlushMappedMemoryRanges` (if required - if memory type is not `HOST_COHERENT`). + +It is also the safest one, because using `memcpy` avoids a risk of accidentally introducing memory reads +(e.g. by doing `pMappedVectors[i] += v`), which may be very slow on memory types that are not `HOST_CACHED`. + +\code +struct ConstantBuffer +{ + ... +}; +ConstantBuffer constantBufferData = ... + +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; + +VkBuffer buf; +VmaAllocation alloc; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); + +vmaCopyMemoryToAllocation(allocator, &constantBufferData, alloc, 0, sizeof(ConstantBuffer)); +\endcode + +Copy in the other direction - from an allocation to a host pointer can be performed the same way using function vmaCopyAllocationToMemory(). + +\section memory_mapping_mapping_functions Mapping functions + +The library provides following functions for mapping of a specific allocation: vmaMapMemory(), vmaUnmapMemory(). +They are safer and more convenient to use than standard Vulkan functions. +You can map an allocation multiple times simultaneously - mapping is reference-counted internally. +You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block. +The way it is implemented is that the library always maps entire memory block, not just region of the allocation. +For further details, see description of vmaMapMemory() function. +Example: + +\code +// Having these objects initialized: +struct ConstantBuffer +{ + ... +}; +ConstantBuffer constantBufferData = ... + +VmaAllocator allocator = ... +VkBuffer constantBuffer = ... +VmaAllocation constantBufferAllocation = ... + +// You can map and fill your buffer using following code: + +void* mappedData; +vmaMapMemory(allocator, constantBufferAllocation, &mappedData); +memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); +vmaUnmapMemory(allocator, constantBufferAllocation); +\endcode + +When mapping, you may see a warning from Vulkan validation layer similar to this one: + +Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used. + +It happens because the library maps entire `VkDeviceMemory` block, where different +types of images and buffers may end up together, especially on GPUs with unified memory like Intel. +You can safely ignore it if you are sure you access only memory of the intended +object that you wanted to map. + + +\section memory_mapping_persistently_mapped_memory Persistently mapped memory + +Keeping your memory persistently mapped is generally OK in Vulkan. +You don't need to unmap it before using its data on the GPU. +The library provides a special feature designed for that: +Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in +VmaAllocationCreateInfo::flags stay mapped all the time, +so you can just access CPU pointer to it any time +without a need to call any "map" or "unmap" function. +Example: + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +// Buffer is already mapped. You can access its memory. +memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); +\endcode + +\note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up +in a mappable memory type. +For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or +#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +#VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation. +For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading. + +\section memory_mapping_cache_control Cache flush and invalidate + +Memory in Vulkan doesn't need to be unmapped before using it on GPU, +but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set, +you need to manually **invalidate** cache before reading of mapped pointer +and **flush** cache after writing to mapped pointer. +Map/unmap operations don't do that automatically. +Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`, +`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient +functions that refer to given allocation object: vmaFlushAllocation(), +vmaInvalidateAllocation(), +or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations(). + +Regions of memory specified for flush/invalidate must be aligned to +`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library. +In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations +within blocks are aligned to this value, so their offsets are always multiply of +`nonCoherentAtomSize` and two different allocations never share same "line" of this size. + +Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA) +currently provide `HOST_COHERENT` flag on all memory types that are +`HOST_VISIBLE`, so on PC you may not need to bother. + + +\page staying_within_budget Staying within budget + +When developing a graphics-intensive game or program, it is important to avoid allocating +more GPU memory than it is physically available. When the memory is over-committed, +various bad things can happen, depending on the specific GPU, graphics driver, and +operating system: + +- It may just work without any problems. +- The application may slow down because some memory blocks are moved to system RAM + and the GPU has to access them through PCI Express bus. +- A new allocation may take very long time to complete, even few seconds, and possibly + freeze entire system. +- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST` + returned somewhere later. + +\section staying_within_budget_querying_for_budget Querying for budget + +To query for current memory usage and available budget, use function vmaGetHeapBudgets(). +Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap. + +Please note that this function returns different information and works faster than +vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every +allocation, while vmaCalculateStatistics() is intended to be used rarely, +only to obtain statistical information, e.g. for debugging purposes. + +It is recommended to use VK_EXT_memory_budget device extension to obtain information +about the budget from Vulkan device. VMA is able to use this extension automatically. +When not enabled, the allocator behaves same way, but then it estimates current usage +and available budget based on its internal information and Vulkan memory heap sizes, +which may be less precise. In order to use this extension: + +1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2 + required by it are available and enable them. Please note that the first is a device + extension and the second is instance extension! +2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object. +3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from + Vulkan inside of it to avoid overhead of querying it with every allocation. + +\section staying_within_budget_controlling_memory_usage Controlling memory usage + +There are many ways in which you can try to stay within the budget. + +First, when making new allocation requires allocating a new memory block, the library +tries not to exceed the budget automatically. If a block with default recommended size +(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even +dedicated memory for just this resource. + +If the size of the requested resource plus current memory usage is more than the +budget, by default the library still tries to create it, leaving it to the Vulkan +implementation whether the allocation succeeds or fails. You can change this behavior +by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is +not made if it would exceed the budget or if the budget is already exceeded. +VMA then tries to make the allocation from the next eligible Vulkan memory type. +The all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag +when creating resources that are not essential for the application (e.g. the texture +of a specific object) and not to pass it when creating critically important resources +(e.g. render targets). + +On AMD graphics cards there is a custom vendor extension available: VK_AMD_memory_overallocation_behavior +that allows to control the behavior of the Vulkan implementation in out-of-memory cases - +whether it should fail with an error code or still allow the allocation. +Usage of this extension involves only passing extra structure on Vulkan device creation, +so it is out of scope of this library. + +Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure +a new allocation is created only when it fits inside one of the existing memory blocks. +If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +This also ensures that the function call is very fast because it never goes to Vulkan +to obtain a new block. + +\note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount +set to more than 0 will currently try to allocate memory blocks without checking whether they +fit within budget. + + +\page resource_aliasing Resource aliasing (overlap) + +New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory +management, give an opportunity to alias (overlap) multiple resources in the +same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL). +It can be useful to save video memory, but it must be used with caution. + +For example, if you know the flow of your whole render frame in advance, you +are going to use some intermediate textures or buffers only during a small range of render passes, +and you know these ranges don't overlap in time, you can bind these resources to +the same place in memory, even if they have completely different parameters (width, height, format etc.). + +![Resource aliasing (overlap)](../gfx/Aliasing.png) + +Such scenario is possible using VMA, but you need to create your images manually. +Then you need to calculate parameters of an allocation to be made using formula: + +- allocation size = max(size of each image) +- allocation alignment = max(alignment of each image) +- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image) + +Following example shows two different images bound to the same place in memory, +allocated to fit largest of them. + +\code +// A 512x512 texture to be sampled. +VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +img1CreateInfo.imageType = VK_IMAGE_TYPE_2D; +img1CreateInfo.extent.width = 512; +img1CreateInfo.extent.height = 512; +img1CreateInfo.extent.depth = 1; +img1CreateInfo.mipLevels = 10; +img1CreateInfo.arrayLayers = 1; +img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB; +img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; +img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +// A full screen texture to be used as color attachment. +VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +img2CreateInfo.imageType = VK_IMAGE_TYPE_2D; +img2CreateInfo.extent.width = 1920; +img2CreateInfo.extent.height = 1080; +img2CreateInfo.extent.depth = 1; +img2CreateInfo.mipLevels = 1; +img2CreateInfo.arrayLayers = 1; +img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VkImage img1; +res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1); +VkImage img2; +res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2); + +VkMemoryRequirements img1MemReq; +vkGetImageMemoryRequirements(device, img1, &img1MemReq); +VkMemoryRequirements img2MemReq; +vkGetImageMemoryRequirements(device, img2, &img2MemReq); + +VkMemoryRequirements finalMemReq = {}; +finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size); +finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment); +finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits; +// Validate if(finalMemReq.memoryTypeBits != 0) + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + +VmaAllocation alloc; +res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr); + +res = vmaBindImageMemory(allocator, alloc, img1); +res = vmaBindImageMemory(allocator, alloc, img2); + +// You can use img1, img2 here, but not at the same time! + +vmaFreeMemory(allocator, alloc); +vkDestroyImage(allocator, img2, nullptr); +vkDestroyImage(allocator, img1, nullptr); +\endcode + +VMA also provides convenience functions that create a buffer or image and bind it to memory +represented by an existing #VmaAllocation: +vmaCreateAliasingBuffer(), vmaCreateAliasingBuffer2(), +vmaCreateAliasingImage(), vmaCreateAliasingImage2(). +Versions with "2" offer additional parameter `allocationLocalOffset`. + +Remember that using resources that alias in memory requires proper synchronization. +You need to issue a memory barrier to make sure commands that use `img1` and `img2` +don't overlap on GPU timeline. +You also need to treat a resource after aliasing as uninitialized - containing garbage data. +For example, if you use `img1` and then want to use `img2`, you need to issue +an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`. + +Additional considerations: + +- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases. +See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag. +- You can create more complex layout where different images and buffers are bound +at different offsets inside one large allocation. For example, one can imagine +a big texture used in some render passes, aliasing with a set of many small buffers +used between in some further passes. To bind a resource at non-zero offset in an allocation, +use vmaBindBufferMemory2() / vmaBindImageMemory2(). +- Before allocating memory for the resources you want to alias, check `memoryTypeBits` +returned in memory requirements of each resource to make sure the bits overlap. +Some GPUs may expose multiple memory types suitable e.g. only for buffers or +images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your +resources may be disjoint. Aliasing them is not possible in that case. + + +\page custom_memory_pools Custom memory pools + +A memory pool contains a number of `VkDeviceMemory` blocks. +The library automatically creates and manages default pool for each memory type available on the device. +Default memory pool automatically grows in size. +Size of allocated blocks is also variable and managed automatically. +You are using default pools whenever you leave VmaAllocationCreateInfo::pool = null. + +You can create custom pool and allocate memory out of it. +It can be useful if you want to: + +- Keep certain kind of allocations separate from others. +- Enforce particular, fixed size of Vulkan memory blocks. +- Limit maximum amount of Vulkan memory allocated for that pool. +- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool. +- Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in + #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain. +- Perform defragmentation on a specific subset of your allocations. + +To use custom memory pools: + +-# Fill VmaPoolCreateInfo structure. +-# Call vmaCreatePool() to obtain #VmaPool handle. +-# When making an allocation, set VmaAllocationCreateInfo::pool to this handle. + You don't need to specify any other parameters of this structure, like `usage`. + +Example: + +\code +// Find memoryTypeIndex for the pool. +VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +sampleBufCreateInfo.size = 0x10000; // Doesn't matter. +sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo sampleAllocCreateInfo = {}; +sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + +uint32_t memTypeIndex; +VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator, + &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex); +// Check res... + +// Create a pool that can have at most 2 blocks, 128 MiB each. +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +poolCreateInfo.blockSize = 128ull * 1024 * 1024; +poolCreateInfo.maxBlockCount = 2; + +VmaPool pool; +res = vmaCreatePool(allocator, &poolCreateInfo, &pool); +// Check res... + +// Allocate a buffer out of it. +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 1024; +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.pool = pool; + +VkBuffer buf; +VmaAllocation alloc; +res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); +// Check res... +\endcode + +You have to free all allocations made from this pool before destroying it. + +\code +vmaDestroyBuffer(allocator, buf, alloc); +vmaDestroyPool(allocator, pool); +\endcode + +New versions of this library support creating dedicated allocations in custom pools. +It is supported only when VmaPoolCreateInfo::blockSize = 0. +To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and +VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + + +\section custom_memory_pools_MemTypeIndex Choosing memory type index + +When creating a pool, you must explicitly specify memory type index. +To find the one suitable for your buffers or images, you can use helper functions +vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo(). +You need to provide structures with example parameters of buffers or images +that you are going to create in that pool. + +\code +VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +exampleBufCreateInfo.size = 1024; // Doesn't matter +exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + +uint32_t memTypeIndex; +vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex); + +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +// ... +\endcode + +When creating buffers/images allocated in that pool, provide following parameters: + +- `VkBufferCreateInfo`: Prefer to pass same parameters as above. + Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior. + Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers + or the other way around. +- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member. + Other members are ignored anyway. + + +\section custom_memory_pools_when_not_use When not to use custom pools + +Custom pools are commonly overused by VMA users. +While it may feel natural to keep some logical groups of resources separate in memory, +in most cases it does more harm than good. +Using custom pool shouldn't be your first choice. +Instead, please make all allocations from default pools first and only use custom pools +if you can prove and measure that it is beneficial in some way, +e.g. it results in lower memory usage, better performance, etc. + +Using custom pools has disadvantages: + +- Each pool has its own collection of `VkDeviceMemory` blocks. + Some of them may be partially or even completely empty. + Spreading allocations across multiple pools increases the amount of wasted (allocated but unbound) memory. +- You must manually choose specific memory type to be used by a custom pool (set as VmaPoolCreateInfo::memoryTypeIndex). + When using default pools, best memory type for each of your allocations can be selected automatically + using a carefully design algorithm that works across all kinds of GPUs. +- If an allocation from a custom pool at specific memory type fails, entire allocation operation returns failure. + When using default pools, VMA tries another compatible memory type. +- If you set VmaPoolCreateInfo::blockSize != 0, each memory block has the same size, + while default pools start from small blocks and only allocate next blocks larger and larger + up to the preferred block size. + +Many of the common concerns can be addressed in a different way than using custom pools: + +- If you want to keep your allocations of certain size (small versus large) or certain lifetime (transient versus long lived) + separate, you likely don't need to. + VMA uses a high quality allocation algorithm that manages memory well in various cases. + Please measure and check if using custom pools provides a benefit. +- If you want to keep your images and buffers separate, you don't need to. + VMA respects `bufferImageGranularity` limit automatically. +- If you want to keep your mapped and not mapped allocations separate, you don't need to. + VMA respects `nonCoherentAtomSize` limit automatically. + It also maps only those `VkDeviceMemory` blocks that need to map any allocation. + It even tries to keep mappable and non-mappable allocations in separate blocks to minimize the amount of mapped memory. +- If you want to choose a custom size for the default memory block, you can set it globally instead + using VmaAllocatorCreateInfo::preferredLargeHeapBlockSize. +- If you want to select specific memory type for your allocation, + you can set VmaAllocationCreateInfo::memoryTypeBits to `(1u << myMemoryTypeIndex)` instead. +- If you need to create a buffer with certain minimum alignment, you can still do it + using default pools with dedicated function vmaCreateBufferWithAlignment(). + + +\section linear_algorithm Linear allocation algorithm + +Each Vulkan memory block managed by this library has accompanying metadata that +keeps track of used and unused regions. By default, the metadata structure and +algorithm tries to find best place for new allocations among free regions to +optimize memory usage. This way you can allocate and free objects in any order. + +![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png) + +Sometimes there is a need to use simpler, linear allocation algorithm. You can +create custom pool that uses such algorithm by adding flag +#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating +#VmaPool object. Then an alternative metadata management is used. It always +creates new allocations after last one and doesn't reuse free regions after +allocations freed in the middle. It results in better allocation performance and +less memory consumed by metadata. + +![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png) + +With this one flag, you can create a custom pool that can be used in many ways: +free-at-once, stack, double stack, and ring buffer. See below for details. +You don't need to specify explicitly which of these options you are going to use - it is detected automatically. + +\subsection linear_algorithm_free_at_once Free-at-once + +In a pool that uses linear algorithm, you still need to free all the allocations +individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free +them in any order. New allocations are always made after last one - free space +in the middle is not reused. However, when you release all the allocation and +the pool becomes empty, allocation starts from the beginning again. This way you +can use linear algorithm to speed up creation of allocations that you are going +to release all at once. + +![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_stack Stack + +When you free an allocation that was created last, its space can be reused. +Thanks to this, if you always release allocations in the order opposite to their +creation (LIFO - Last In First Out), you can achieve behavior of a stack. + +![Stack](../gfx/Linear_allocator_4_stack.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_double_stack Double stack + +The space reserved by a custom pool with linear algorithm may be used by two +stacks: + +- First, default one, growing up from offset 0. +- Second, "upper" one, growing down from the end towards lower offsets. + +To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT +to VmaAllocationCreateInfo::flags. + +![Double stack](../gfx/Linear_allocator_7_double_stack.png) + +Double stack is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +When the two stacks' ends meet so there is not enough space between them for a +new allocation, such allocation fails with usual +`VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + +\subsection linear_algorithm_ring_buffer Ring buffer + +When you free some allocations from the beginning and there is not enough free space +for a new one at the end of a pool, allocator's "cursor" wraps around to the +beginning and starts allocation there. Thanks to this, if you always release +allocations in the same order as you created them (FIFO - First In First Out), +you can achieve behavior of a ring buffer / queue. + +![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png) + +Ring buffer is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +\note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. + + +\page defragmentation Defragmentation + +Interleaved allocations and deallocations of many objects of varying size can +cause fragmentation over time, which can lead to a situation where the library is unable +to find a continuous range of free memory for a new allocation despite there is +enough free space, just scattered across many small free ranges between existing +allocations. + +To mitigate this problem, you can use defragmentation feature. +It doesn't happen automatically though and needs your cooperation, +because VMA is a low level library that only allocates memory. +It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures. +It cannot copy their contents as it doesn't record any commands to a command buffer. + +Example: + +\code +VmaDefragmentationInfo defragInfo = {}; +defragInfo.pool = myPool; +defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT; + +VmaDefragmentationContext defragCtx; +VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx); +// Check res... + +for(;;) +{ + VmaDefragmentationPassMoveInfo pass; + res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass); + if(res == VK_SUCCESS) + break; + else if(res != VK_INCOMPLETE) + // Handle error... + + for(uint32_t i = 0; i < pass.moveCount; ++i) + { + // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents. + VmaAllocationInfo allocInfo; + vmaGetAllocationInfo(allocator, pass.pMoves[i].srcAllocation, &allocInfo); + MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData; + + // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset. + VkImageCreateInfo imgCreateInfo = ... + VkImage newImg; + res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg); + // Check res... + res = vmaBindImageMemory(allocator, pass.pMoves[i].dstTmpAllocation, newImg); + // Check res... + + // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place. + vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...); + } + + // Make sure the copy commands finished executing. + vkWaitForFences(...); + + // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation. + for(uint32_t i = 0; i < pass.moveCount; ++i) + { + // ... + vkDestroyImage(device, resData->img, nullptr); + } + + // Update appropriate descriptors to point to the new places... + + res = vmaEndDefragmentationPass(allocator, defragCtx, &pass); + if(res == VK_SUCCESS) + break; + else if(res != VK_INCOMPLETE) + // Handle error... +} + +vmaEndDefragmentation(allocator, defragCtx, nullptr); +\endcode + +Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage() +create/destroy an allocation and a buffer/image at once, these are just a shortcut for +creating the resource, allocating memory, and binding them together. +Defragmentation works on memory allocations only. You must handle the rest manually. +Defragmentation is an iterative process that should repreat "passes" as long as related functions +return `VK_INCOMPLETE` not `VK_SUCCESS`. +In each pass: + +1. vmaBeginDefragmentationPass() function call: + - Calculates and returns the list of allocations to be moved in this pass. + Note this can be a time-consuming process. + - Reserves destination memory for them by creating temporary destination allocations + that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo(). +2. Inside the pass, **you should**: + - Inspect the returned list of allocations to be moved. + - Create new buffers/images and bind them at the returned destination temporary allocations. + - Copy data from source to destination resources if necessary. + - Destroy the source buffers/images, but NOT their allocations. +3. vmaEndDefragmentationPass() function call: + - Frees the source memory reserved for the allocations that are moved. + - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory. + - Frees `VkDeviceMemory` blocks that became empty. + +Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter. +Defragmentation algorithm tries to move all suitable allocations. +You can, however, refuse to move some of them inside a defragmentation pass, by setting +`pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. +This is not recommended and may result in suboptimal packing of the allocations after defragmentation. +If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool. + +Inside a pass, for each allocation that should be moved: + +- You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`. + - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass(). +- If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared, + filled, and used temporarily in each rendering frame, you can just recreate this image + without copying its data. +- If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU + using `memcpy()`. +- If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. + This will cancel the move. + - vmaEndDefragmentationPass() will then free the destination memory + not the source memory of the allocation, leaving it unchanged. +- If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time), + you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. + - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object. + +You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool +(like in the example above) or all the default pools by setting this member to null. + +Defragmentation is always performed in each pool separately. +Allocations are never moved between different Vulkan memory types. +The size of the destination memory reserved for a moved allocation is the same as the original one. +Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation. +Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones. + +You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved +in each pass, e.g. to call it in sync with render frames and not to experience too big hitches. +See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass. + +It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA +usage, possibly from multiple threads, with the exception that allocations +returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended. + +Mapping is preserved on allocations that are moved during defragmentation. +Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations +are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried +using VmaAllocationInfo::pMappedData. + +\note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. + + +\page statistics Statistics + +This library contains several functions that return information about its internal state, +especially the amount of memory allocated from Vulkan. + +\section statistics_numeric_statistics Numeric statistics + +If you need to obtain basic statistics about memory usage per heap, together with current budget, +you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget. +This is useful to keep track of memory usage and stay within budget +(see also \ref staying_within_budget). +Example: + +\code +uint32_t heapIndex = ... + +VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; +vmaGetHeapBudgets(allocator, budgets); + +printf("My heap currently has %u allocations taking %llu B,\n", + budgets[heapIndex].statistics.allocationCount, + budgets[heapIndex].statistics.allocationBytes); +printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n", + budgets[heapIndex].statistics.blockCount, + budgets[heapIndex].statistics.blockBytes); +printf("Vulkan reports total usage %llu B with budget %llu B.\n", + budgets[heapIndex].usage, + budgets[heapIndex].budget); +\endcode + +You can query for more detailed statistics per memory heap, type, and totals, +including minimum and maximum allocation size and unused range size, +by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics. +This function is slower though, as it has to traverse all the internal data structures, +so it should be used only for debugging purposes. + +You can query for statistics of a custom pool using function vmaGetPoolStatistics() +or vmaCalculatePoolStatistics(). + +You can query for information about a specific allocation using function vmaGetAllocationInfo(). +It fill structure #VmaAllocationInfo. + +\section statistics_json_dump JSON dump + +You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString(). +The result is guaranteed to be correct JSON. +It uses ANSI encoding. +Any strings provided by user (see [Allocation names](@ref allocation_names)) +are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding, +this JSON string can be treated as using this encoding. +It must be freed using function vmaFreeStatsString(). + +The format of this JSON string is not part of official documentation of the library, +but it will not change in backward-incompatible way without increasing library major version number +and appropriate mention in changelog. + +The JSON string contains all the data that can be obtained using vmaCalculateStatistics(). +It can also contain detailed map of allocated memory blocks and their regions - +free and occupied by allocations. +This allows e.g. to visualize the memory or assess fragmentation. + + +\page allocation_annotation Allocation names and user data + +\section allocation_user_data Allocation user data + +You can annotate allocations with your own information, e.g. for debugging purposes. +To do that, fill VmaAllocationCreateInfo::pUserData field when creating +an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer, +some handle, index, key, ordinal number or any other value that would associate +the allocation with your custom metadata. +It is useful to identify appropriate data structures in your engine given #VmaAllocation, +e.g. when doing \ref defragmentation. + +\code +VkBufferCreateInfo bufCreateInfo = ... + +MyBufferMetadata* pMetadata = CreateBufferMetadata(); + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.pUserData = pMetadata; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr); +\endcode + +The pointer may be later retrieved as VmaAllocationInfo::pUserData: + +\code +VmaAllocationInfo allocInfo; +vmaGetAllocationInfo(allocator, allocation, &allocInfo); +MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData; +\endcode + +It can also be changed using function vmaSetAllocationUserData(). + +Values of (non-zero) allocations' `pUserData` are printed in JSON report created by +vmaBuildStatsString() in hexadecimal form. + +\section allocation_names Allocation names + +An allocation can also carry a null-terminated string, giving a name to the allocation. +To set it, call vmaSetAllocationName(). +The library creates internal copy of the string, so the pointer you pass doesn't need +to be valid for whole lifetime of the allocation. You can free it after the call. + +\code +std::string imageName = "Texture: "; +imageName += fileName; +vmaSetAllocationName(allocator, allocation, imageName.c_str()); +\endcode + +The string can be later retrieved by inspecting VmaAllocationInfo::pName. +It is also printed in JSON report created by vmaBuildStatsString(). + +\note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. +You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library. + + +\page virtual_allocator Virtual allocator + +As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator". +It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block". +You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan. +A common use case is sub-allocation of pieces of one large GPU buffer. + +\section virtual_allocator_creating_virtual_block Creating virtual block + +To use this functionality, there is no main "allocator" object. +You don't need to have #VmaAllocator object created. +All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator: + +-# Fill in #VmaVirtualBlockCreateInfo structure. +-# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object. + +Example: + +\code +VmaVirtualBlockCreateInfo blockCreateInfo = {}; +blockCreateInfo.size = 1048576; // 1 MB + +VmaVirtualBlock block; +VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block); +\endcode + +\section virtual_allocator_making_virtual_allocations Making virtual allocations + +#VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions +using the same code as the main Vulkan memory allocator. +Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type +that represents an opaque handle to an allocation within the virtual block. + +In order to make such allocation: + +-# Fill in #VmaVirtualAllocationCreateInfo structure. +-# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation. + You can also receive `VkDeviceSize offset` that was assigned to the allocation. + +Example: + +\code +VmaVirtualAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.size = 4096; // 4 KB + +VmaVirtualAllocation alloc; +VkDeviceSize offset; +res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset); +if(res == VK_SUCCESS) +{ + // Use the 4 KB of your memory starting at offset. +} +else +{ + // Allocation failed - no space for it could be found. Handle this error! +} +\endcode + +\section virtual_allocator_deallocation Deallocation + +When no longer needed, an allocation can be freed by calling vmaVirtualFree(). +You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate() +called for the same #VmaVirtualBlock. + +When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock(). +All allocations must be freed before the block is destroyed, which is checked internally by an assert. +However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once - +a feature not available in normal Vulkan memory allocator. Example: + +\code +vmaVirtualFree(block, alloc); +vmaDestroyVirtualBlock(block); +\endcode + +\section virtual_allocator_allocation_parameters Allocation parameters + +You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData(). +Its default value is null. +It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some +larger data structure containing more information. Example: + +\code +struct CustomAllocData +{ + std::string m_AllocName; +}; +CustomAllocData* allocData = new CustomAllocData(); +allocData->m_AllocName = "My allocation 1"; +vmaSetVirtualAllocationUserData(block, alloc, allocData); +\endcode + +The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function +vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo. +If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation! +Example: + +\code +VmaVirtualAllocationInfo allocInfo; +vmaGetVirtualAllocationInfo(block, alloc, &allocInfo); +delete (CustomAllocData*)allocInfo.pUserData; + +vmaVirtualFree(block, alloc); +\endcode + +\section virtual_allocator_alignment_and_units Alignment and units + +It feels natural to express sizes and offsets in bytes. +If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member +VmaVirtualAllocationCreateInfo::alignment to request it. Example: + +\code +VmaVirtualAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.size = 4096; // 4 KB +allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B + +VmaVirtualAllocation alloc; +res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr); +\endcode + +Alignments of different allocations made from one block may vary. +However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`, +you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes. +It might be more convenient, but you need to make sure to use this new unit consistently in all the places: + +- VmaVirtualBlockCreateInfo::size +- VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment +- Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset + +\section virtual_allocator_statistics Statistics + +You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics() +(to get brief statistics that are fast to calculate) +or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate). +The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator. +Example: + +\code +VmaStatistics stats; +vmaGetVirtualBlockStatistics(block, &stats); +printf("My virtual block has %llu bytes used by %u virtual allocations\n", + stats.allocationBytes, stats.allocationCount); +\endcode + +You can also request a full list of allocations and free regions as a string in JSON format by calling +vmaBuildVirtualBlockStatsString(). +Returned string must be later freed using vmaFreeVirtualBlockStatsString(). +The format of this string differs from the one returned by the main Vulkan allocator, but it is similar. + +\section virtual_allocator_additional_considerations Additional considerations + +The "virtual allocator" functionality is implemented on a level of individual memory blocks. +Keeping track of a whole collection of blocks, allocating new ones when out of free space, +deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user. + +Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory. +See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT). +You can find their description in chapter \ref custom_memory_pools. +Allocation strategies are also supported. +See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT). + +Following features are supported only by the allocator of the real GPU memory and not by virtual allocations: +buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`. + + +\page debugging_memory_usage Debugging incorrect memory usage + +If you suspect a bug with memory usage, like usage of uninitialized memory or +memory being overwritten out of bounds of an allocation, +you can use debug features of this library to verify this. + +\section debugging_memory_usage_initialization Memory initialization + +If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used, +you can enable automatic memory initialization to verify this. +To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1. + +\code +#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1 +#include "vk_mem_alloc.h" +\endcode + +It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`. +Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`. +Memory is automatically mapped and unmapped if necessary. + +If you find these values while debugging your program, good chances are that you incorrectly +read Vulkan memory that is allocated but not initialized, or already freed, respectively. + +Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped. +It works also with dedicated allocations. + +\section debugging_memory_usage_margins Margins + +By default, allocations are laid out in memory blocks next to each other if possible +(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`). + +![Allocations without margin](../gfx/Margins_1.png) + +Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified +number of bytes as a margin after every allocation. + +\code +#define VMA_DEBUG_MARGIN 16 +#include "vk_mem_alloc.h" +\endcode + +![Allocations with margin](../gfx/Margins_2.png) + +If your bug goes away after enabling margins, it means it may be caused by memory +being overwritten outside of allocation boundaries. It is not 100% certain though. +Change in application behavior may also be caused by different order and distribution +of allocations across memory blocks after margins are applied. + +Margins work with all types of memory. + +Margin is applied only to allocations made out of memory blocks and not to dedicated +allocations, which have their own memory block of specific size. +It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag +or those automatically decided to put into dedicated allocations, e.g. due to its +large size or recommended by VK_KHR_dedicated_allocation extension. + +Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space. + +Note that enabling margins increases memory usage and fragmentation. + +Margins do not apply to \ref virtual_allocator. + +\section debugging_memory_usage_corruption_detection Corruption detection + +You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation +of contents of the margins. + +\code +#define VMA_DEBUG_MARGIN 16 +#define VMA_DEBUG_DETECT_CORRUPTION 1 +#include "vk_mem_alloc.h" +\endcode + +When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN` +(it must be multiply of 4) after every allocation is filled with a magic number. +This idea is also know as "canary". +Memory is automatically mapped and unmapped if necessary. + +This number is validated automatically when the allocation is destroyed. +If it is not equal to the expected value, `VMA_ASSERT()` is executed. +It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation, +which indicates a serious bug. + +You can also explicitly request checking margins of all allocations in all memory blocks +that belong to specified memory types by using function vmaCheckCorruption(), +or in memory blocks that belong to specified custom pool, by using function +vmaCheckPoolCorruption(). + +Margin validation (corruption detection) works only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. + + +\section debugging_memory_usage_leak_detection Leak detection features + +At allocation and allocator destruction time VMA checks for unfreed and unmapped blocks using +`VMA_ASSERT_LEAK()`. This macro defaults to an assertion, triggering a typically fatal error in Debug +builds, and doing nothing in Release builds. You can provide your own definition of `VMA_ASSERT_LEAK()` +to change this behavior. + +At memory block destruction time VMA lists out all unfreed allocations using the `VMA_LEAK_LOG_FORMAT()` +macro, which defaults to `VMA_DEBUG_LOG_FORMAT`, which in turn defaults to a no-op. +If you're having trouble with leaks - for example, the aforementioned assertion triggers, but you don't +quite know \em why -, overriding this macro to print out the the leaking blocks, combined with assigning +individual names to allocations using vmaSetAllocationName(), can greatly aid in fixing them. + +\page other_api_interop Interop with other graphics APIs + +VMA provides some features that help with interoperability with other graphics APIs, e.g. OpenGL. + +\section opengl_interop_exporting_memory Exporting memory + +If you want to attach `VkExportMemoryAllocateInfoKHR` or other structure to `pNext` chain of memory allocations made by the library: + +You can create \ref custom_memory_pools for such allocations. +Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext +while creating the custom pool. +Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool, +not only while creating it, as no copy of the structure is made, +but its original pointer is used for each allocation instead. + +If you want to export all memory allocated by VMA from certain memory types, +also dedicated allocations or other allocations made from default pools, +an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes. +It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library +through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type. +Please note that new versions of the library also support dedicated allocations created in custom pools. + +You should not mix these two methods in a way that allows to apply both to the same memory type. +Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`. + + +\section opengl_interop_custom_alignment Custom alignment + +Buffers or images exported to a different API like OpenGL may require a different alignment, +higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`. +To impose such alignment: + +You can create \ref custom_memory_pools for such allocations. +Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation +to be made out of this pool. +The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image +from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically. + +If you want to create a buffer with a specific minimum alignment out of default pools, +use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`. + +Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated +allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block. +You can ensure that an allocation is created as dedicated by using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation. + +\section opengl_interop_extended_allocation_information Extended allocation information + +If you want to rely on VMA to allocate your buffers and images inside larger memory blocks, +but you need to know the size of the entire block and whether the allocation was made +with its own dedicated memory, use function vmaGetAllocationInfo2() to retrieve +extended allocation information in structure #VmaAllocationInfo2. + + + +\page usage_patterns Recommended usage patterns + +Vulkan gives great flexibility in memory allocation. +This chapter shows the most common patterns. + +See also slides from talk: +[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New) + + +\section usage_patterns_gpu_only GPU-only resource + +When: +Any resources that you frequently write and read on GPU, +e.g. images used as color attachments (aka "render targets"), depth-stencil attachments, +images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)"). + +What to do: +Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + +\code +VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; +imgCreateInfo.extent.width = 3840; +imgCreateInfo.extent.height = 2160; +imgCreateInfo.extent.depth = 1; +imgCreateInfo.mipLevels = 1; +imgCreateInfo.arrayLayers = 1; +imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; +allocCreateInfo.priority = 1.0f; + +VkImage img; +VmaAllocation alloc; +vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); +\endcode + +Also consider: +Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, +especially if they are large or if you plan to destroy and recreate them with different sizes +e.g. when display resolution changes. +Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later. +When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation +to decrease chances to be evicted to system memory by the operating system. + +\section usage_patterns_staging_copy_upload Staging copy for upload + +When: +A "staging" buffer than you want to map and fill from CPU code, then use as a source of transfer +to some GPU resource. + +What to do: +Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT. +Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +... + +memcpy(allocInfo.pMappedData, myData, myDataSize); +\endcode + +Also consider: +You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped +using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above. + + +\section usage_patterns_readback Readback + +When: +Buffers for data written by or transferred from the GPU that you want to read back on the CPU, +e.g. results of some computations. + +What to do: +Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +... + +const float* downloadedData = (const float*)allocInfo.pMappedData; +\endcode + + +\section usage_patterns_advanced_data_uploading Advanced data uploading + +For resources that you frequently write on CPU via mapped pointer and +frequently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible: + +-# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory, + even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card, + and make the device reach out to that resource directly. + - Reads performed by the device will then go through PCI Express bus. + The performance of this access may be limited, but it may be fine depending on the size + of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity + of access. +-# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips), + a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL` + (fast to access from the GPU). Then, it is likely the best choice for such type of resource. +-# Systems with a discrete graphics card and separate video memory may or may not expose + a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR). + If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS) + that is available to CPU for mapping. + - Writes performed by the host to that memory go through PCI Express bus. + The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0, + as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads. +-# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory, + a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them. + +Thankfully, VMA offers an aid to create and use such resources in the the way optimal +for the current Vulkan device. To help the library make the best choice, +use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with +#VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT. +It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR), +but if no such memory type is available or allocation from it fails +(PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS), +it will fall back to `DEVICE_LOCAL` memory for fast GPU access. +It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`, +so you need to create another "staging" allocation and perform explicit transfers. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +VkMemoryPropertyFlags memPropFlags; +vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags); + +if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) +{ + // Allocation ended up in a mappable memory and is already mapped - write to it directly. + + // [Executed in runtime]: + memcpy(allocInfo.pMappedData, myData, myDataSize); +} +else +{ + // Allocation ended up in a non-mappable memory - need to transfer. + VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + stagingBufCreateInfo.size = 65536; + stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + + VmaAllocationCreateInfo stagingAllocCreateInfo = {}; + stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + + VkBuffer stagingBuf; + VmaAllocation stagingAlloc; + VmaAllocationInfo stagingAllocInfo; + vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo, + &stagingBuf, &stagingAlloc, stagingAllocInfo); + + // [Executed in runtime]: + memcpy(stagingAllocInfo.pMappedData, myData, myDataSize); + vmaFlushAllocation(allocator, stagingAlloc, 0, VK_WHOLE_SIZE); + //vkCmdPipelineBarrier: VK_ACCESS_HOST_WRITE_BIT --> VK_ACCESS_TRANSFER_READ_BIT + VkBufferCopy bufCopy = { + 0, // srcOffset + 0, // dstOffset, + myDataSize); // size + vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy); +} +\endcode + +\section usage_patterns_other_use_cases Other use cases + +Here are some other, less obvious use cases and their recommended settings: + +- An image that is used only as transfer source and destination, but it should stay on the device, + as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame, + for temporal antialiasing or other temporal effects. + - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO +- An image that is used only as transfer source and destination, but it should be placed + in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict + least recently used textures from VRAM. + - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST, + as VMA needs a hint here to differentiate from the previous case. +- A buffer that you want to map and write from the CPU, directly read from the GPU + (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or + host memory due to its large size. + - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST + - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT + + +\page configuration Configuration + +Please check "CONFIGURATION SECTION" in the code to find macros that you can define +before each include of this file or change directly in this file to provide +your own implementation of basic facilities like assert, `min()` and `max()` functions, +mutex, atomic etc. +The library uses its own implementation of containers by default, but you can switch to using +STL containers instead. + +For example, define `VMA_ASSERT(expr)` before including the library to provide +custom implementation of the assertion, compatible with your project. +By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration +and empty otherwise. + +\section config_Vulkan_functions Pointers to Vulkan functions + +There are multiple ways to import pointers to Vulkan functions in the library. +In the simplest case you don't need to do anything. +If the compilation or linking of your program or the initialization of the #VmaAllocator +doesn't work for you, you can try to reconfigure it. + +First, the allocator tries to fetch pointers to Vulkan functions linked statically, +like this: + +\code +m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; +\endcode + +If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`. + +Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions. +You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or +by using a helper library like [volk](https://github.com/zeux/volk). + +Third, VMA tries to fetch remaining pointers that are still null by calling +`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own. +You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr. +Other pointers will be fetched automatically. +If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`. + +Finally, all the function pointers required by the library (considering selected +Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null. + + +\section custom_memory_allocator Custom host memory allocator + +If you use custom allocator for CPU memory rather than default operator `new` +and `delete` from C++, you can make this library using your allocator as well +by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These +functions will be passed to Vulkan, as well as used by the library itself to +make any CPU-side allocations. + +\section allocation_callbacks Device memory allocation callbacks + +The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally. +You can setup callbacks to be informed about these calls, e.g. for the purpose +of gathering some statistics. To do it, fill optional member +VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. + +\section heap_memory_limit Device heap memory limit + +When device memory of certain heap runs out of free space, new allocations may +fail (returning error code) or they may succeed, silently pushing some existing_ +memory blocks from GPU VRAM to system RAM (which degrades performance). This +behavior is implementation-dependent - it depends on GPU vendor and graphics +driver. + +On AMD cards it can be controlled while creating Vulkan device object by using +VK_AMD_memory_overallocation_behavior extension, if available. + +Alternatively, if you want to test how your program behaves with limited amount of Vulkan device +memory available without switching your graphics card to one that really has +smaller VRAM, you can use a feature of this library intended for this purpose. +To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit. + + + +\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation + +VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve +performance on some GPUs. It augments Vulkan API with possibility to query +driver whether it prefers particular buffer or image to have its own, dedicated +allocation (separate `VkDeviceMemory` block) for better efficiency - to be able +to do some internal optimizations. The extension is supported by this library. +It will be used automatically when enabled. + +It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version +and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion, +you are all set. + +Otherwise, if you want to use it as an extension: + +1 . When creating Vulkan device, check if following 2 device extensions are +supported (call `vkEnumerateDeviceExtensionProperties()`). +If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`). + +- VK_KHR_get_memory_requirements2 +- VK_KHR_dedicated_allocation + +If you enabled these extensions: + +2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating +your #VmaAllocator to inform the library that you enabled required extensions +and you want the library to use them. + +\code +allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT; + +vmaCreateAllocator(&allocatorInfo, &allocator); +\endcode + +That is all. The extension will be automatically used whenever you create a +buffer using vmaCreateBuffer() or image using vmaCreateImage(). + +When using the extension together with Vulkan Validation Layer, you will receive +warnings like this: + +_vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._ + +It is OK, you should just ignore it. It happens because you use function +`vkGetBufferMemoryRequirements2KHR()` instead of standard +`vkGetBufferMemoryRequirements()`, while the validation layer seems to be +unaware of it. + +To learn more about this extension, see: + +- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation) +- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5) + + + +\page vk_ext_memory_priority VK_EXT_memory_priority + +VK_EXT_memory_priority is a device extension that allows to pass additional "priority" +value to Vulkan memory allocations that the implementation may use prefer certain +buffers and images that are critical for performance to stay in device-local memory +in cases when the memory is over-subscribed, while some others may be moved to the system memory. + +VMA offers convenient usage of this extension. +If you enable it, you can pass "priority" parameter when creating allocations or custom pools +and the library automatically passes the value to Vulkan using this extension. + +If you want to use this extension in connection with VMA, follow these steps: + +\section vk_ext_memory_priority_initialization Initialization + +1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true. + +3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority" +to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to +`VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT +to VmaAllocatorCreateInfo::flags. + +\section vk_ext_memory_priority_usage Usage + +When using this extension, you should initialize following member: + +- VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +- VmaPoolCreateInfo::priority when creating a custom pool. + +It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`. +Memory allocated with higher value can be treated by the Vulkan implementation as higher priority +and so it can have lower chances of being pushed out to system memory, experiencing degraded performance. + +It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images +as dedicated and set high priority to them. For example: + +\code +VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; +imgCreateInfo.extent.width = 3840; +imgCreateInfo.extent.height = 2160; +imgCreateInfo.extent.depth = 1; +imgCreateInfo.mipLevels = 1; +imgCreateInfo.arrayLayers = 1; +imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; +allocCreateInfo.priority = 1.0f; + +VkImage img; +VmaAllocation alloc; +vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); +\endcode + +`priority` member is ignored in the following situations: + +- Allocations created in custom pools: They inherit the priority, along with all other allocation parameters + from the parameters passed in #VmaPoolCreateInfo when the pool was created. +- Allocations created in default pools: They inherit the priority from the parameters + VMA used when creating default pools, which means `priority == 0.5f`. + + +\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory + +VK_AMD_device_coherent_memory is a device extension that enables access to +additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and +`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for +allocation of buffers intended for writing "breadcrumb markers" in between passes +or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases. + +When the extension is available but has not been enabled, Vulkan physical device +still exposes those memory types, but their usage is forbidden. VMA automatically +takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt +to allocate memory of such type is made. + +If you want to use this extension in connection with VMA, follow these steps: + +\section vk_amd_device_coherent_memory_initialization Initialization + +1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true. + +3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory" +to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT +to VmaAllocatorCreateInfo::flags. + +\section vk_amd_device_coherent_memory_usage Usage + +After following steps described above, you can create VMA allocations and custom pools +out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible +devices. There are multiple ways to do it, for example: + +- You can request or prefer to allocate out of such memory types by adding + `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags + or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with + other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage. +- If you manually found memory type index to use for this purpose, force allocation + from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`. + +\section vk_amd_device_coherent_memory_more_information More information + +To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + + +\page enabling_buffer_device_address Enabling buffer device address + +Device extension VK_KHR_buffer_device_address +allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code. +It has been promoted to core Vulkan 1.2. + +If you want to use this feature in connection with VMA, follow these steps: + +\section enabling_buffer_device_address_initialization Initialization + +1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains +"VK_KHR_buffer_device_address". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true. + +3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add +"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT +to VmaAllocatorCreateInfo::flags. + +\section enabling_buffer_device_address_usage Usage + +After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA. +The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to +allocated memory blocks wherever it might be needed. + +Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`. +The second part of this functionality related to "capture and replay" is not supported, +as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage. + +\section enabling_buffer_device_address_more_information More information + +To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + +\page general_considerations General considerations + +\section general_considerations_thread_safety Thread safety + +- The library has no global state, so separate #VmaAllocator objects can be used + independently. + There should be no need to create multiple such objects though - one per `VkDevice` is enough. +- By default, all calls to functions that take #VmaAllocator as first parameter + are safe to call from multiple threads simultaneously because they are + synchronized internally when needed. + This includes allocation and deallocation from default memory pool, as well as custom #VmaPool. +- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT + flag, calls to functions that take such #VmaAllocator object must be + synchronized externally. +- Access to a #VmaAllocation object must be externally synchronized. For example, + you must not call vmaGetAllocationInfo() and vmaMapMemory() from different + threads at the same time if you pass the same #VmaAllocation object to these + functions. +- #VmaVirtualBlock is not safe to be used from multiple threads simultaneously. + +\section general_considerations_versioning_and_compatibility Versioning and compatibility + +The library uses [**Semantic Versioning**](https://semver.org/), +which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where: + +- Incremented Patch version means a release is backward- and forward-compatible, + introducing only some internal improvements, bug fixes, optimizations etc. + or changes that are out of scope of the official API described in this documentation. +- Incremented Minor version means a release is backward-compatible, + so existing code that uses the library should continue to work, while some new + symbols could have been added: new structures, functions, new values in existing + enums and bit flags, new structure members, but not new function parameters. +- Incrementing Major version means a release could break some backward compatibility. + +All changes between official releases are documented in file "CHANGELOG.md". + +\warning Backward compatibility is considered on the level of C++ source code, not binary linkage. +Adding new members to existing structures is treated as backward compatible if initializing +the new members to binary zero results in the old behavior. +You should always fully initialize all library structures to zeros and not rely on their +exact binary size. + +\section general_considerations_validation_layer_warnings Validation layer warnings + +When using this library, you can meet following types of warnings issued by +Vulkan validation layer. They don't necessarily indicate a bug, so you may need +to just ignore them. + +- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.* + - It happens when VK_KHR_dedicated_allocation extension is enabled. + `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it. +- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.* + - It happens when you map a buffer or image, because the library maps entire + `VkDeviceMemory` block, where different types of images and buffers may end + up together, especially on GPUs with unified memory like Intel. +- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.* + - It may happen when you use [defragmentation](@ref defragmentation). + +\section general_considerations_allocation_algorithm Allocation algorithm + +The library uses following algorithm for allocation, in order: + +-# Try to find free range of memory in existing blocks. +-# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size. +-# If failed, try to create such block with size / 2, size / 4, size / 8. +-# If failed, try to allocate separate `VkDeviceMemory` for this allocation, + just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +-# If failed, choose other memory type that meets the requirements specified in + VmaAllocationCreateInfo and go to point 1. +-# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + +\section general_considerations_features_not_supported Features not supported + +Features deliberately excluded from the scope of this library: + +-# **Data transfer.** Uploading (streaming) and downloading data of buffers and images + between CPU and GPU memory and related synchronization is responsibility of the user. + Defining some "texture" object that would automatically stream its data from a + staging copy in CPU memory to GPU memory would rather be a feature of another, + higher-level library implemented on top of VMA. + VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory. +-# **Recreation of buffers and images.** Although the library has functions for + buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to + recreate these objects yourself after defragmentation. That is because the big + structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in + #VmaAllocation object. +-# **Handling CPU memory allocation failures.** When dynamically creating small C++ + objects in CPU memory (not Vulkan memory), allocation failures are not checked + and handled gracefully, because that would complicate code significantly and + is usually not needed in desktop PC applications anyway. + Success of an allocation is just checked with an assert. +-# **Code free of any compiler warnings.** Maintaining the library to compile and + work correctly on so many different platforms is hard enough. Being free of + any warnings, on any version of any compiler, is simply not feasible. + There are many preprocessor macros that make some variables unused, function parameters unreferenced, + or conditional expressions constant in some configurations. + The code of this library should not be bigger or more complicated just to silence these warnings. + It is recommended to disable such warnings instead. +-# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but + are not going to be included into this repository. +*/ diff --git a/third_party/volk.c b/third_party/volk.c deleted file mode 100644 index 03b6486..0000000 --- a/third_party/volk.c +++ /dev/null @@ -1,3041 +0,0 @@ -/* This file is part of volk library; see volk.h for version/license details */ -/* clang-format off */ -#include "volk.h" - -#ifdef _WIN32 - typedef const char* LPCSTR; - typedef struct HINSTANCE__* HINSTANCE; - typedef HINSTANCE HMODULE; - #if defined(_MINWINDEF_) - /* minwindef.h defines FARPROC, and attempting to redefine it may conflict with -Wstrict-prototypes */ - #elif defined(_WIN64) - typedef __int64 (__stdcall* FARPROC)(void); - #else - typedef int (__stdcall* FARPROC)(void); - #endif -#else -# include -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef _WIN32 -__declspec(dllimport) HMODULE __stdcall LoadLibraryA(LPCSTR); -__declspec(dllimport) FARPROC __stdcall GetProcAddress(HMODULE, LPCSTR); -__declspec(dllimport) int __stdcall FreeLibrary(HMODULE); -#endif - -static void* loadedModule = NULL; -static VkInstance loadedInstance = VK_NULL_HANDLE; -static VkDevice loadedDevice = VK_NULL_HANDLE; - -static void volkGenLoadLoader(void* context, PFN_vkVoidFunction (*load)(void*, const char*)); -static void volkGenLoadInstance(void* context, PFN_vkVoidFunction (*load)(void*, const char*)); -static void volkGenLoadDevice(void* context, PFN_vkVoidFunction (*load)(void*, const char*)); -static void volkGenLoadDeviceTable(struct VolkDeviceTable* table, void* context, PFN_vkVoidFunction (*load)(void*, const char*)); - -static PFN_vkVoidFunction vkGetInstanceProcAddrStub(void* context, const char* name) -{ - return vkGetInstanceProcAddr((VkInstance)context, name); -} - -static PFN_vkVoidFunction vkGetDeviceProcAddrStub(void* context, const char* name) -{ - return vkGetDeviceProcAddr((VkDevice)context, name); -} - -static PFN_vkVoidFunction nullProcAddrStub(void* context, const char* name) -{ - (void)context; - (void)name; - return NULL; -} - -VkResult volkInitialize(void) -{ -#if defined(_WIN32) - HMODULE module = LoadLibraryA("vulkan-1.dll"); - if (!module) - return VK_ERROR_INITIALIZATION_FAILED; - - // note: function pointer is cast through void function pointer to silence cast-function-type warning on gcc8 - vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)(void(*)(void))GetProcAddress(module, "vkGetInstanceProcAddr"); -#elif defined(__APPLE__) - void* module = dlopen("libvulkan.dylib", RTLD_NOW | RTLD_LOCAL); - if (!module) - module = dlopen("libvulkan.1.dylib", RTLD_NOW | RTLD_LOCAL); - if (!module) - module = dlopen("libMoltenVK.dylib", RTLD_NOW | RTLD_LOCAL); - if (!module) - return VK_ERROR_INITIALIZATION_FAILED; - - vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)dlsym(module, "vkGetInstanceProcAddr"); -#else - void* module = dlopen("libvulkan.so.1", RTLD_NOW | RTLD_LOCAL); - if (!module) - module = dlopen("libvulkan.so", RTLD_NOW | RTLD_LOCAL); - if (!module) - return VK_ERROR_INITIALIZATION_FAILED; - - vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)dlsym(module, "vkGetInstanceProcAddr"); -#endif - - loadedModule = module; - volkGenLoadLoader(NULL, vkGetInstanceProcAddrStub); - - return VK_SUCCESS; -} - -void volkInitializeCustom(PFN_vkGetInstanceProcAddr handler) -{ - vkGetInstanceProcAddr = handler; - - loadedModule = NULL; - volkGenLoadLoader(NULL, vkGetInstanceProcAddrStub); -} - -void volkFinalize(void) -{ - if (loadedModule) - { -#if defined(_WIN32) - FreeLibrary((HMODULE)loadedModule); -#else - dlclose(loadedModule); -#endif - } - - vkGetInstanceProcAddr = NULL; - volkGenLoadLoader(NULL, nullProcAddrStub); - volkGenLoadInstance(NULL, nullProcAddrStub); - volkGenLoadDevice(NULL, nullProcAddrStub); - - loadedModule = NULL; - loadedInstance = VK_NULL_HANDLE; - loadedDevice = VK_NULL_HANDLE; -} - -uint32_t volkGetInstanceVersion(void) -{ -#if defined(VK_VERSION_1_1) - uint32_t apiVersion = 0; - if (vkEnumerateInstanceVersion && vkEnumerateInstanceVersion(&apiVersion) == VK_SUCCESS) - return apiVersion; -#endif - - if (vkCreateInstance) - return VK_API_VERSION_1_0; - - return 0; -} - -void volkLoadInstance(VkInstance instance) -{ - loadedInstance = instance; - volkGenLoadInstance(instance, vkGetInstanceProcAddrStub); - volkGenLoadDevice(instance, vkGetInstanceProcAddrStub); -} - -void volkLoadInstanceOnly(VkInstance instance) -{ - loadedInstance = instance; - volkGenLoadInstance(instance, vkGetInstanceProcAddrStub); -} - -VkInstance volkGetLoadedInstance(void) -{ - return loadedInstance; -} - -void volkLoadDevice(VkDevice device) -{ - loadedDevice = device; - volkGenLoadDevice(device, vkGetDeviceProcAddrStub); -} - -VkDevice volkGetLoadedDevice(void) -{ - return loadedDevice; -} - -void volkLoadDeviceTable(struct VolkDeviceTable* table, VkDevice device) -{ - volkGenLoadDeviceTable(table, device, vkGetDeviceProcAddrStub); -} - -static void volkGenLoadLoader(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) -{ - /* VOLK_GENERATE_LOAD_LOADER */ -#if defined(VK_VERSION_1_0) - vkCreateInstance = (PFN_vkCreateInstance)load(context, "vkCreateInstance"); - vkEnumerateInstanceExtensionProperties = (PFN_vkEnumerateInstanceExtensionProperties)load(context, "vkEnumerateInstanceExtensionProperties"); - vkEnumerateInstanceLayerProperties = (PFN_vkEnumerateInstanceLayerProperties)load(context, "vkEnumerateInstanceLayerProperties"); -#endif /* defined(VK_VERSION_1_0) */ -#if defined(VK_VERSION_1_1) - vkEnumerateInstanceVersion = (PFN_vkEnumerateInstanceVersion)load(context, "vkEnumerateInstanceVersion"); -#endif /* defined(VK_VERSION_1_1) */ - /* VOLK_GENERATE_LOAD_LOADER */ -} - -static void volkGenLoadInstance(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) -{ - /* VOLK_GENERATE_LOAD_INSTANCE */ -#if defined(VK_VERSION_1_0) - vkCreateDevice = (PFN_vkCreateDevice)load(context, "vkCreateDevice"); - vkDestroyInstance = (PFN_vkDestroyInstance)load(context, "vkDestroyInstance"); - vkEnumerateDeviceExtensionProperties = (PFN_vkEnumerateDeviceExtensionProperties)load(context, "vkEnumerateDeviceExtensionProperties"); - vkEnumerateDeviceLayerProperties = (PFN_vkEnumerateDeviceLayerProperties)load(context, "vkEnumerateDeviceLayerProperties"); - vkEnumeratePhysicalDevices = (PFN_vkEnumeratePhysicalDevices)load(context, "vkEnumeratePhysicalDevices"); - vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)load(context, "vkGetDeviceProcAddr"); - vkGetPhysicalDeviceFeatures = (PFN_vkGetPhysicalDeviceFeatures)load(context, "vkGetPhysicalDeviceFeatures"); - vkGetPhysicalDeviceFormatProperties = (PFN_vkGetPhysicalDeviceFormatProperties)load(context, "vkGetPhysicalDeviceFormatProperties"); - vkGetPhysicalDeviceImageFormatProperties = (PFN_vkGetPhysicalDeviceImageFormatProperties)load(context, "vkGetPhysicalDeviceImageFormatProperties"); - vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)load(context, "vkGetPhysicalDeviceMemoryProperties"); - vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)load(context, "vkGetPhysicalDeviceProperties"); - vkGetPhysicalDeviceQueueFamilyProperties = (PFN_vkGetPhysicalDeviceQueueFamilyProperties)load(context, "vkGetPhysicalDeviceQueueFamilyProperties"); - vkGetPhysicalDeviceSparseImageFormatProperties = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties)load(context, "vkGetPhysicalDeviceSparseImageFormatProperties"); -#endif /* defined(VK_VERSION_1_0) */ -#if defined(VK_VERSION_1_1) - vkEnumeratePhysicalDeviceGroups = (PFN_vkEnumeratePhysicalDeviceGroups)load(context, "vkEnumeratePhysicalDeviceGroups"); - vkGetPhysicalDeviceExternalBufferProperties = (PFN_vkGetPhysicalDeviceExternalBufferProperties)load(context, "vkGetPhysicalDeviceExternalBufferProperties"); - vkGetPhysicalDeviceExternalFenceProperties = (PFN_vkGetPhysicalDeviceExternalFenceProperties)load(context, "vkGetPhysicalDeviceExternalFenceProperties"); - vkGetPhysicalDeviceExternalSemaphoreProperties = (PFN_vkGetPhysicalDeviceExternalSemaphoreProperties)load(context, "vkGetPhysicalDeviceExternalSemaphoreProperties"); - vkGetPhysicalDeviceFeatures2 = (PFN_vkGetPhysicalDeviceFeatures2)load(context, "vkGetPhysicalDeviceFeatures2"); - vkGetPhysicalDeviceFormatProperties2 = (PFN_vkGetPhysicalDeviceFormatProperties2)load(context, "vkGetPhysicalDeviceFormatProperties2"); - vkGetPhysicalDeviceImageFormatProperties2 = (PFN_vkGetPhysicalDeviceImageFormatProperties2)load(context, "vkGetPhysicalDeviceImageFormatProperties2"); - vkGetPhysicalDeviceMemoryProperties2 = (PFN_vkGetPhysicalDeviceMemoryProperties2)load(context, "vkGetPhysicalDeviceMemoryProperties2"); - vkGetPhysicalDeviceProperties2 = (PFN_vkGetPhysicalDeviceProperties2)load(context, "vkGetPhysicalDeviceProperties2"); - vkGetPhysicalDeviceQueueFamilyProperties2 = (PFN_vkGetPhysicalDeviceQueueFamilyProperties2)load(context, "vkGetPhysicalDeviceQueueFamilyProperties2"); - vkGetPhysicalDeviceSparseImageFormatProperties2 = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)load(context, "vkGetPhysicalDeviceSparseImageFormatProperties2"); -#endif /* defined(VK_VERSION_1_1) */ -#if defined(VK_VERSION_1_3) - vkGetPhysicalDeviceToolProperties = (PFN_vkGetPhysicalDeviceToolProperties)load(context, "vkGetPhysicalDeviceToolProperties"); -#endif /* defined(VK_VERSION_1_3) */ -#if defined(VK_EXT_acquire_drm_display) - vkAcquireDrmDisplayEXT = (PFN_vkAcquireDrmDisplayEXT)load(context, "vkAcquireDrmDisplayEXT"); - vkGetDrmDisplayEXT = (PFN_vkGetDrmDisplayEXT)load(context, "vkGetDrmDisplayEXT"); -#endif /* defined(VK_EXT_acquire_drm_display) */ -#if defined(VK_EXT_acquire_xlib_display) - vkAcquireXlibDisplayEXT = (PFN_vkAcquireXlibDisplayEXT)load(context, "vkAcquireXlibDisplayEXT"); - vkGetRandROutputDisplayEXT = (PFN_vkGetRandROutputDisplayEXT)load(context, "vkGetRandROutputDisplayEXT"); -#endif /* defined(VK_EXT_acquire_xlib_display) */ -#if defined(VK_EXT_calibrated_timestamps) - vkGetPhysicalDeviceCalibrateableTimeDomainsEXT = (PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT)load(context, "vkGetPhysicalDeviceCalibrateableTimeDomainsEXT"); -#endif /* defined(VK_EXT_calibrated_timestamps) */ -#if defined(VK_EXT_debug_report) - vkCreateDebugReportCallbackEXT = (PFN_vkCreateDebugReportCallbackEXT)load(context, "vkCreateDebugReportCallbackEXT"); - vkDebugReportMessageEXT = (PFN_vkDebugReportMessageEXT)load(context, "vkDebugReportMessageEXT"); - vkDestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT)load(context, "vkDestroyDebugReportCallbackEXT"); -#endif /* defined(VK_EXT_debug_report) */ -#if defined(VK_EXT_debug_utils) - vkCmdBeginDebugUtilsLabelEXT = (PFN_vkCmdBeginDebugUtilsLabelEXT)load(context, "vkCmdBeginDebugUtilsLabelEXT"); - vkCmdEndDebugUtilsLabelEXT = (PFN_vkCmdEndDebugUtilsLabelEXT)load(context, "vkCmdEndDebugUtilsLabelEXT"); - vkCmdInsertDebugUtilsLabelEXT = (PFN_vkCmdInsertDebugUtilsLabelEXT)load(context, "vkCmdInsertDebugUtilsLabelEXT"); - vkCreateDebugUtilsMessengerEXT = (PFN_vkCreateDebugUtilsMessengerEXT)load(context, "vkCreateDebugUtilsMessengerEXT"); - vkDestroyDebugUtilsMessengerEXT = (PFN_vkDestroyDebugUtilsMessengerEXT)load(context, "vkDestroyDebugUtilsMessengerEXT"); - vkQueueBeginDebugUtilsLabelEXT = (PFN_vkQueueBeginDebugUtilsLabelEXT)load(context, "vkQueueBeginDebugUtilsLabelEXT"); - vkQueueEndDebugUtilsLabelEXT = (PFN_vkQueueEndDebugUtilsLabelEXT)load(context, "vkQueueEndDebugUtilsLabelEXT"); - vkQueueInsertDebugUtilsLabelEXT = (PFN_vkQueueInsertDebugUtilsLabelEXT)load(context, "vkQueueInsertDebugUtilsLabelEXT"); - vkSetDebugUtilsObjectNameEXT = (PFN_vkSetDebugUtilsObjectNameEXT)load(context, "vkSetDebugUtilsObjectNameEXT"); - vkSetDebugUtilsObjectTagEXT = (PFN_vkSetDebugUtilsObjectTagEXT)load(context, "vkSetDebugUtilsObjectTagEXT"); - vkSubmitDebugUtilsMessageEXT = (PFN_vkSubmitDebugUtilsMessageEXT)load(context, "vkSubmitDebugUtilsMessageEXT"); -#endif /* defined(VK_EXT_debug_utils) */ -#if defined(VK_EXT_direct_mode_display) - vkReleaseDisplayEXT = (PFN_vkReleaseDisplayEXT)load(context, "vkReleaseDisplayEXT"); -#endif /* defined(VK_EXT_direct_mode_display) */ -#if defined(VK_EXT_directfb_surface) - vkCreateDirectFBSurfaceEXT = (PFN_vkCreateDirectFBSurfaceEXT)load(context, "vkCreateDirectFBSurfaceEXT"); - vkGetPhysicalDeviceDirectFBPresentationSupportEXT = (PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT)load(context, "vkGetPhysicalDeviceDirectFBPresentationSupportEXT"); -#endif /* defined(VK_EXT_directfb_surface) */ -#if defined(VK_EXT_display_surface_counter) - vkGetPhysicalDeviceSurfaceCapabilities2EXT = (PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)load(context, "vkGetPhysicalDeviceSurfaceCapabilities2EXT"); -#endif /* defined(VK_EXT_display_surface_counter) */ -#if defined(VK_EXT_full_screen_exclusive) - vkGetPhysicalDeviceSurfacePresentModes2EXT = (PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT)load(context, "vkGetPhysicalDeviceSurfacePresentModes2EXT"); -#endif /* defined(VK_EXT_full_screen_exclusive) */ -#if defined(VK_EXT_headless_surface) - vkCreateHeadlessSurfaceEXT = (PFN_vkCreateHeadlessSurfaceEXT)load(context, "vkCreateHeadlessSurfaceEXT"); -#endif /* defined(VK_EXT_headless_surface) */ -#if defined(VK_EXT_metal_surface) - vkCreateMetalSurfaceEXT = (PFN_vkCreateMetalSurfaceEXT)load(context, "vkCreateMetalSurfaceEXT"); -#endif /* defined(VK_EXT_metal_surface) */ -#if defined(VK_EXT_sample_locations) - vkGetPhysicalDeviceMultisamplePropertiesEXT = (PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)load(context, "vkGetPhysicalDeviceMultisamplePropertiesEXT"); -#endif /* defined(VK_EXT_sample_locations) */ -#if defined(VK_EXT_tooling_info) - vkGetPhysicalDeviceToolPropertiesEXT = (PFN_vkGetPhysicalDeviceToolPropertiesEXT)load(context, "vkGetPhysicalDeviceToolPropertiesEXT"); -#endif /* defined(VK_EXT_tooling_info) */ -#if defined(VK_FUCHSIA_imagepipe_surface) - vkCreateImagePipeSurfaceFUCHSIA = (PFN_vkCreateImagePipeSurfaceFUCHSIA)load(context, "vkCreateImagePipeSurfaceFUCHSIA"); -#endif /* defined(VK_FUCHSIA_imagepipe_surface) */ -#if defined(VK_GGP_stream_descriptor_surface) - vkCreateStreamDescriptorSurfaceGGP = (PFN_vkCreateStreamDescriptorSurfaceGGP)load(context, "vkCreateStreamDescriptorSurfaceGGP"); -#endif /* defined(VK_GGP_stream_descriptor_surface) */ -#if defined(VK_KHR_android_surface) - vkCreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)load(context, "vkCreateAndroidSurfaceKHR"); -#endif /* defined(VK_KHR_android_surface) */ -#if defined(VK_KHR_cooperative_matrix) - vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR = (PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR)load(context, "vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR"); -#endif /* defined(VK_KHR_cooperative_matrix) */ -#if defined(VK_KHR_device_group_creation) - vkEnumeratePhysicalDeviceGroupsKHR = (PFN_vkEnumeratePhysicalDeviceGroupsKHR)load(context, "vkEnumeratePhysicalDeviceGroupsKHR"); -#endif /* defined(VK_KHR_device_group_creation) */ -#if defined(VK_KHR_display) - vkCreateDisplayModeKHR = (PFN_vkCreateDisplayModeKHR)load(context, "vkCreateDisplayModeKHR"); - vkCreateDisplayPlaneSurfaceKHR = (PFN_vkCreateDisplayPlaneSurfaceKHR)load(context, "vkCreateDisplayPlaneSurfaceKHR"); - vkGetDisplayModePropertiesKHR = (PFN_vkGetDisplayModePropertiesKHR)load(context, "vkGetDisplayModePropertiesKHR"); - vkGetDisplayPlaneCapabilitiesKHR = (PFN_vkGetDisplayPlaneCapabilitiesKHR)load(context, "vkGetDisplayPlaneCapabilitiesKHR"); - vkGetDisplayPlaneSupportedDisplaysKHR = (PFN_vkGetDisplayPlaneSupportedDisplaysKHR)load(context, "vkGetDisplayPlaneSupportedDisplaysKHR"); - vkGetPhysicalDeviceDisplayPlanePropertiesKHR = (PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)load(context, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR"); - vkGetPhysicalDeviceDisplayPropertiesKHR = (PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)load(context, "vkGetPhysicalDeviceDisplayPropertiesKHR"); -#endif /* defined(VK_KHR_display) */ -#if defined(VK_KHR_external_fence_capabilities) - vkGetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)load(context, "vkGetPhysicalDeviceExternalFencePropertiesKHR"); -#endif /* defined(VK_KHR_external_fence_capabilities) */ -#if defined(VK_KHR_external_memory_capabilities) - vkGetPhysicalDeviceExternalBufferPropertiesKHR = (PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)load(context, "vkGetPhysicalDeviceExternalBufferPropertiesKHR"); -#endif /* defined(VK_KHR_external_memory_capabilities) */ -#if defined(VK_KHR_external_semaphore_capabilities) - vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)load(context, "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"); -#endif /* defined(VK_KHR_external_semaphore_capabilities) */ -#if defined(VK_KHR_fragment_shading_rate) - vkGetPhysicalDeviceFragmentShadingRatesKHR = (PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR)load(context, "vkGetPhysicalDeviceFragmentShadingRatesKHR"); -#endif /* defined(VK_KHR_fragment_shading_rate) */ -#if defined(VK_KHR_get_display_properties2) - vkGetDisplayModeProperties2KHR = (PFN_vkGetDisplayModeProperties2KHR)load(context, "vkGetDisplayModeProperties2KHR"); - vkGetDisplayPlaneCapabilities2KHR = (PFN_vkGetDisplayPlaneCapabilities2KHR)load(context, "vkGetDisplayPlaneCapabilities2KHR"); - vkGetPhysicalDeviceDisplayPlaneProperties2KHR = (PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR)load(context, "vkGetPhysicalDeviceDisplayPlaneProperties2KHR"); - vkGetPhysicalDeviceDisplayProperties2KHR = (PFN_vkGetPhysicalDeviceDisplayProperties2KHR)load(context, "vkGetPhysicalDeviceDisplayProperties2KHR"); -#endif /* defined(VK_KHR_get_display_properties2) */ -#if defined(VK_KHR_get_physical_device_properties2) - vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)load(context, "vkGetPhysicalDeviceFeatures2KHR"); - vkGetPhysicalDeviceFormatProperties2KHR = (PFN_vkGetPhysicalDeviceFormatProperties2KHR)load(context, "vkGetPhysicalDeviceFormatProperties2KHR"); - vkGetPhysicalDeviceImageFormatProperties2KHR = (PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)load(context, "vkGetPhysicalDeviceImageFormatProperties2KHR"); - vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)load(context, "vkGetPhysicalDeviceMemoryProperties2KHR"); - vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)load(context, "vkGetPhysicalDeviceProperties2KHR"); - vkGetPhysicalDeviceQueueFamilyProperties2KHR = (PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)load(context, "vkGetPhysicalDeviceQueueFamilyProperties2KHR"); - vkGetPhysicalDeviceSparseImageFormatProperties2KHR = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)load(context, "vkGetPhysicalDeviceSparseImageFormatProperties2KHR"); -#endif /* defined(VK_KHR_get_physical_device_properties2) */ -#if defined(VK_KHR_get_surface_capabilities2) - vkGetPhysicalDeviceSurfaceCapabilities2KHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)load(context, "vkGetPhysicalDeviceSurfaceCapabilities2KHR"); - vkGetPhysicalDeviceSurfaceFormats2KHR = (PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)load(context, "vkGetPhysicalDeviceSurfaceFormats2KHR"); -#endif /* defined(VK_KHR_get_surface_capabilities2) */ -#if defined(VK_KHR_performance_query) - vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR = (PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)load(context, "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR"); - vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR = (PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR)load(context, "vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR"); -#endif /* defined(VK_KHR_performance_query) */ -#if defined(VK_KHR_surface) - vkDestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)load(context, "vkDestroySurfaceKHR"); - vkGetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)load(context, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); - vkGetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)load(context, "vkGetPhysicalDeviceSurfaceFormatsKHR"); - vkGetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)load(context, "vkGetPhysicalDeviceSurfacePresentModesKHR"); - vkGetPhysicalDeviceSurfaceSupportKHR = (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)load(context, "vkGetPhysicalDeviceSurfaceSupportKHR"); -#endif /* defined(VK_KHR_surface) */ -#if defined(VK_KHR_video_encode_queue) - vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR = (PFN_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR)load(context, "vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR"); -#endif /* defined(VK_KHR_video_encode_queue) */ -#if defined(VK_KHR_video_queue) - vkGetPhysicalDeviceVideoCapabilitiesKHR = (PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR)load(context, "vkGetPhysicalDeviceVideoCapabilitiesKHR"); - vkGetPhysicalDeviceVideoFormatPropertiesKHR = (PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR)load(context, "vkGetPhysicalDeviceVideoFormatPropertiesKHR"); -#endif /* defined(VK_KHR_video_queue) */ -#if defined(VK_KHR_wayland_surface) - vkCreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)load(context, "vkCreateWaylandSurfaceKHR"); - vkGetPhysicalDeviceWaylandPresentationSupportKHR = (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)load(context, "vkGetPhysicalDeviceWaylandPresentationSupportKHR"); -#endif /* defined(VK_KHR_wayland_surface) */ -#if defined(VK_KHR_win32_surface) - vkCreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)load(context, "vkCreateWin32SurfaceKHR"); - vkGetPhysicalDeviceWin32PresentationSupportKHR = (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)load(context, "vkGetPhysicalDeviceWin32PresentationSupportKHR"); -#endif /* defined(VK_KHR_win32_surface) */ -#if defined(VK_KHR_xcb_surface) - vkCreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)load(context, "vkCreateXcbSurfaceKHR"); - vkGetPhysicalDeviceXcbPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)load(context, "vkGetPhysicalDeviceXcbPresentationSupportKHR"); -#endif /* defined(VK_KHR_xcb_surface) */ -#if defined(VK_KHR_xlib_surface) - vkCreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)load(context, "vkCreateXlibSurfaceKHR"); - vkGetPhysicalDeviceXlibPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)load(context, "vkGetPhysicalDeviceXlibPresentationSupportKHR"); -#endif /* defined(VK_KHR_xlib_surface) */ -#if defined(VK_MVK_ios_surface) - vkCreateIOSSurfaceMVK = (PFN_vkCreateIOSSurfaceMVK)load(context, "vkCreateIOSSurfaceMVK"); -#endif /* defined(VK_MVK_ios_surface) */ -#if defined(VK_MVK_macos_surface) - vkCreateMacOSSurfaceMVK = (PFN_vkCreateMacOSSurfaceMVK)load(context, "vkCreateMacOSSurfaceMVK"); -#endif /* defined(VK_MVK_macos_surface) */ -#if defined(VK_NN_vi_surface) - vkCreateViSurfaceNN = (PFN_vkCreateViSurfaceNN)load(context, "vkCreateViSurfaceNN"); -#endif /* defined(VK_NN_vi_surface) */ -#if defined(VK_NV_acquire_winrt_display) - vkAcquireWinrtDisplayNV = (PFN_vkAcquireWinrtDisplayNV)load(context, "vkAcquireWinrtDisplayNV"); - vkGetWinrtDisplayNV = (PFN_vkGetWinrtDisplayNV)load(context, "vkGetWinrtDisplayNV"); -#endif /* defined(VK_NV_acquire_winrt_display) */ -#if defined(VK_NV_cooperative_matrix) - vkGetPhysicalDeviceCooperativeMatrixPropertiesNV = (PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV)load(context, "vkGetPhysicalDeviceCooperativeMatrixPropertiesNV"); -#endif /* defined(VK_NV_cooperative_matrix) */ -#if defined(VK_NV_coverage_reduction_mode) - vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV = (PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV)load(context, "vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV"); -#endif /* defined(VK_NV_coverage_reduction_mode) */ -#if defined(VK_NV_external_memory_capabilities) - vkGetPhysicalDeviceExternalImageFormatPropertiesNV = (PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)load(context, "vkGetPhysicalDeviceExternalImageFormatPropertiesNV"); -#endif /* defined(VK_NV_external_memory_capabilities) */ -#if defined(VK_NV_optical_flow) - vkGetPhysicalDeviceOpticalFlowImageFormatsNV = (PFN_vkGetPhysicalDeviceOpticalFlowImageFormatsNV)load(context, "vkGetPhysicalDeviceOpticalFlowImageFormatsNV"); -#endif /* defined(VK_NV_optical_flow) */ -#if defined(VK_QNX_screen_surface) - vkCreateScreenSurfaceQNX = (PFN_vkCreateScreenSurfaceQNX)load(context, "vkCreateScreenSurfaceQNX"); - vkGetPhysicalDeviceScreenPresentationSupportQNX = (PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX)load(context, "vkGetPhysicalDeviceScreenPresentationSupportQNX"); -#endif /* defined(VK_QNX_screen_surface) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) - vkGetPhysicalDevicePresentRectanglesKHR = (PFN_vkGetPhysicalDevicePresentRectanglesKHR)load(context, "vkGetPhysicalDevicePresentRectanglesKHR"); -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ - /* VOLK_GENERATE_LOAD_INSTANCE */ -} - -static void volkGenLoadDevice(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) -{ - /* VOLK_GENERATE_LOAD_DEVICE */ -#if defined(VK_VERSION_1_0) - vkAllocateCommandBuffers = (PFN_vkAllocateCommandBuffers)load(context, "vkAllocateCommandBuffers"); - vkAllocateDescriptorSets = (PFN_vkAllocateDescriptorSets)load(context, "vkAllocateDescriptorSets"); - vkAllocateMemory = (PFN_vkAllocateMemory)load(context, "vkAllocateMemory"); - vkBeginCommandBuffer = (PFN_vkBeginCommandBuffer)load(context, "vkBeginCommandBuffer"); - vkBindBufferMemory = (PFN_vkBindBufferMemory)load(context, "vkBindBufferMemory"); - vkBindImageMemory = (PFN_vkBindImageMemory)load(context, "vkBindImageMemory"); - vkCmdBeginQuery = (PFN_vkCmdBeginQuery)load(context, "vkCmdBeginQuery"); - vkCmdBeginRenderPass = (PFN_vkCmdBeginRenderPass)load(context, "vkCmdBeginRenderPass"); - vkCmdBindDescriptorSets = (PFN_vkCmdBindDescriptorSets)load(context, "vkCmdBindDescriptorSets"); - vkCmdBindIndexBuffer = (PFN_vkCmdBindIndexBuffer)load(context, "vkCmdBindIndexBuffer"); - vkCmdBindPipeline = (PFN_vkCmdBindPipeline)load(context, "vkCmdBindPipeline"); - vkCmdBindVertexBuffers = (PFN_vkCmdBindVertexBuffers)load(context, "vkCmdBindVertexBuffers"); - vkCmdBlitImage = (PFN_vkCmdBlitImage)load(context, "vkCmdBlitImage"); - vkCmdClearAttachments = (PFN_vkCmdClearAttachments)load(context, "vkCmdClearAttachments"); - vkCmdClearColorImage = (PFN_vkCmdClearColorImage)load(context, "vkCmdClearColorImage"); - vkCmdClearDepthStencilImage = (PFN_vkCmdClearDepthStencilImage)load(context, "vkCmdClearDepthStencilImage"); - vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)load(context, "vkCmdCopyBuffer"); - vkCmdCopyBufferToImage = (PFN_vkCmdCopyBufferToImage)load(context, "vkCmdCopyBufferToImage"); - vkCmdCopyImage = (PFN_vkCmdCopyImage)load(context, "vkCmdCopyImage"); - vkCmdCopyImageToBuffer = (PFN_vkCmdCopyImageToBuffer)load(context, "vkCmdCopyImageToBuffer"); - vkCmdCopyQueryPoolResults = (PFN_vkCmdCopyQueryPoolResults)load(context, "vkCmdCopyQueryPoolResults"); - vkCmdDispatch = (PFN_vkCmdDispatch)load(context, "vkCmdDispatch"); - vkCmdDispatchIndirect = (PFN_vkCmdDispatchIndirect)load(context, "vkCmdDispatchIndirect"); - vkCmdDraw = (PFN_vkCmdDraw)load(context, "vkCmdDraw"); - vkCmdDrawIndexed = (PFN_vkCmdDrawIndexed)load(context, "vkCmdDrawIndexed"); - vkCmdDrawIndexedIndirect = (PFN_vkCmdDrawIndexedIndirect)load(context, "vkCmdDrawIndexedIndirect"); - vkCmdDrawIndirect = (PFN_vkCmdDrawIndirect)load(context, "vkCmdDrawIndirect"); - vkCmdEndQuery = (PFN_vkCmdEndQuery)load(context, "vkCmdEndQuery"); - vkCmdEndRenderPass = (PFN_vkCmdEndRenderPass)load(context, "vkCmdEndRenderPass"); - vkCmdExecuteCommands = (PFN_vkCmdExecuteCommands)load(context, "vkCmdExecuteCommands"); - vkCmdFillBuffer = (PFN_vkCmdFillBuffer)load(context, "vkCmdFillBuffer"); - vkCmdNextSubpass = (PFN_vkCmdNextSubpass)load(context, "vkCmdNextSubpass"); - vkCmdPipelineBarrier = (PFN_vkCmdPipelineBarrier)load(context, "vkCmdPipelineBarrier"); - vkCmdPushConstants = (PFN_vkCmdPushConstants)load(context, "vkCmdPushConstants"); - vkCmdResetEvent = (PFN_vkCmdResetEvent)load(context, "vkCmdResetEvent"); - vkCmdResetQueryPool = (PFN_vkCmdResetQueryPool)load(context, "vkCmdResetQueryPool"); - vkCmdResolveImage = (PFN_vkCmdResolveImage)load(context, "vkCmdResolveImage"); - vkCmdSetBlendConstants = (PFN_vkCmdSetBlendConstants)load(context, "vkCmdSetBlendConstants"); - vkCmdSetDepthBias = (PFN_vkCmdSetDepthBias)load(context, "vkCmdSetDepthBias"); - vkCmdSetDepthBounds = (PFN_vkCmdSetDepthBounds)load(context, "vkCmdSetDepthBounds"); - vkCmdSetEvent = (PFN_vkCmdSetEvent)load(context, "vkCmdSetEvent"); - vkCmdSetLineWidth = (PFN_vkCmdSetLineWidth)load(context, "vkCmdSetLineWidth"); - vkCmdSetScissor = (PFN_vkCmdSetScissor)load(context, "vkCmdSetScissor"); - vkCmdSetStencilCompareMask = (PFN_vkCmdSetStencilCompareMask)load(context, "vkCmdSetStencilCompareMask"); - vkCmdSetStencilReference = (PFN_vkCmdSetStencilReference)load(context, "vkCmdSetStencilReference"); - vkCmdSetStencilWriteMask = (PFN_vkCmdSetStencilWriteMask)load(context, "vkCmdSetStencilWriteMask"); - vkCmdSetViewport = (PFN_vkCmdSetViewport)load(context, "vkCmdSetViewport"); - vkCmdUpdateBuffer = (PFN_vkCmdUpdateBuffer)load(context, "vkCmdUpdateBuffer"); - vkCmdWaitEvents = (PFN_vkCmdWaitEvents)load(context, "vkCmdWaitEvents"); - vkCmdWriteTimestamp = (PFN_vkCmdWriteTimestamp)load(context, "vkCmdWriteTimestamp"); - vkCreateBuffer = (PFN_vkCreateBuffer)load(context, "vkCreateBuffer"); - vkCreateBufferView = (PFN_vkCreateBufferView)load(context, "vkCreateBufferView"); - vkCreateCommandPool = (PFN_vkCreateCommandPool)load(context, "vkCreateCommandPool"); - vkCreateComputePipelines = (PFN_vkCreateComputePipelines)load(context, "vkCreateComputePipelines"); - vkCreateDescriptorPool = (PFN_vkCreateDescriptorPool)load(context, "vkCreateDescriptorPool"); - vkCreateDescriptorSetLayout = (PFN_vkCreateDescriptorSetLayout)load(context, "vkCreateDescriptorSetLayout"); - vkCreateEvent = (PFN_vkCreateEvent)load(context, "vkCreateEvent"); - vkCreateFence = (PFN_vkCreateFence)load(context, "vkCreateFence"); - vkCreateFramebuffer = (PFN_vkCreateFramebuffer)load(context, "vkCreateFramebuffer"); - vkCreateGraphicsPipelines = (PFN_vkCreateGraphicsPipelines)load(context, "vkCreateGraphicsPipelines"); - vkCreateImage = (PFN_vkCreateImage)load(context, "vkCreateImage"); - vkCreateImageView = (PFN_vkCreateImageView)load(context, "vkCreateImageView"); - vkCreatePipelineCache = (PFN_vkCreatePipelineCache)load(context, "vkCreatePipelineCache"); - vkCreatePipelineLayout = (PFN_vkCreatePipelineLayout)load(context, "vkCreatePipelineLayout"); - vkCreateQueryPool = (PFN_vkCreateQueryPool)load(context, "vkCreateQueryPool"); - vkCreateRenderPass = (PFN_vkCreateRenderPass)load(context, "vkCreateRenderPass"); - vkCreateSampler = (PFN_vkCreateSampler)load(context, "vkCreateSampler"); - vkCreateSemaphore = (PFN_vkCreateSemaphore)load(context, "vkCreateSemaphore"); - vkCreateShaderModule = (PFN_vkCreateShaderModule)load(context, "vkCreateShaderModule"); - vkDestroyBuffer = (PFN_vkDestroyBuffer)load(context, "vkDestroyBuffer"); - vkDestroyBufferView = (PFN_vkDestroyBufferView)load(context, "vkDestroyBufferView"); - vkDestroyCommandPool = (PFN_vkDestroyCommandPool)load(context, "vkDestroyCommandPool"); - vkDestroyDescriptorPool = (PFN_vkDestroyDescriptorPool)load(context, "vkDestroyDescriptorPool"); - vkDestroyDescriptorSetLayout = (PFN_vkDestroyDescriptorSetLayout)load(context, "vkDestroyDescriptorSetLayout"); - vkDestroyDevice = (PFN_vkDestroyDevice)load(context, "vkDestroyDevice"); - vkDestroyEvent = (PFN_vkDestroyEvent)load(context, "vkDestroyEvent"); - vkDestroyFence = (PFN_vkDestroyFence)load(context, "vkDestroyFence"); - vkDestroyFramebuffer = (PFN_vkDestroyFramebuffer)load(context, "vkDestroyFramebuffer"); - vkDestroyImage = (PFN_vkDestroyImage)load(context, "vkDestroyImage"); - vkDestroyImageView = (PFN_vkDestroyImageView)load(context, "vkDestroyImageView"); - vkDestroyPipeline = (PFN_vkDestroyPipeline)load(context, "vkDestroyPipeline"); - vkDestroyPipelineCache = (PFN_vkDestroyPipelineCache)load(context, "vkDestroyPipelineCache"); - vkDestroyPipelineLayout = (PFN_vkDestroyPipelineLayout)load(context, "vkDestroyPipelineLayout"); - vkDestroyQueryPool = (PFN_vkDestroyQueryPool)load(context, "vkDestroyQueryPool"); - vkDestroyRenderPass = (PFN_vkDestroyRenderPass)load(context, "vkDestroyRenderPass"); - vkDestroySampler = (PFN_vkDestroySampler)load(context, "vkDestroySampler"); - vkDestroySemaphore = (PFN_vkDestroySemaphore)load(context, "vkDestroySemaphore"); - vkDestroyShaderModule = (PFN_vkDestroyShaderModule)load(context, "vkDestroyShaderModule"); - vkDeviceWaitIdle = (PFN_vkDeviceWaitIdle)load(context, "vkDeviceWaitIdle"); - vkEndCommandBuffer = (PFN_vkEndCommandBuffer)load(context, "vkEndCommandBuffer"); - vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)load(context, "vkFlushMappedMemoryRanges"); - vkFreeCommandBuffers = (PFN_vkFreeCommandBuffers)load(context, "vkFreeCommandBuffers"); - vkFreeDescriptorSets = (PFN_vkFreeDescriptorSets)load(context, "vkFreeDescriptorSets"); - vkFreeMemory = (PFN_vkFreeMemory)load(context, "vkFreeMemory"); - vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)load(context, "vkGetBufferMemoryRequirements"); - vkGetDeviceMemoryCommitment = (PFN_vkGetDeviceMemoryCommitment)load(context, "vkGetDeviceMemoryCommitment"); - vkGetDeviceQueue = (PFN_vkGetDeviceQueue)load(context, "vkGetDeviceQueue"); - vkGetEventStatus = (PFN_vkGetEventStatus)load(context, "vkGetEventStatus"); - vkGetFenceStatus = (PFN_vkGetFenceStatus)load(context, "vkGetFenceStatus"); - vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)load(context, "vkGetImageMemoryRequirements"); - vkGetImageSparseMemoryRequirements = (PFN_vkGetImageSparseMemoryRequirements)load(context, "vkGetImageSparseMemoryRequirements"); - vkGetImageSubresourceLayout = (PFN_vkGetImageSubresourceLayout)load(context, "vkGetImageSubresourceLayout"); - vkGetPipelineCacheData = (PFN_vkGetPipelineCacheData)load(context, "vkGetPipelineCacheData"); - vkGetQueryPoolResults = (PFN_vkGetQueryPoolResults)load(context, "vkGetQueryPoolResults"); - vkGetRenderAreaGranularity = (PFN_vkGetRenderAreaGranularity)load(context, "vkGetRenderAreaGranularity"); - vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)load(context, "vkInvalidateMappedMemoryRanges"); - vkMapMemory = (PFN_vkMapMemory)load(context, "vkMapMemory"); - vkMergePipelineCaches = (PFN_vkMergePipelineCaches)load(context, "vkMergePipelineCaches"); - vkQueueBindSparse = (PFN_vkQueueBindSparse)load(context, "vkQueueBindSparse"); - vkQueueSubmit = (PFN_vkQueueSubmit)load(context, "vkQueueSubmit"); - vkQueueWaitIdle = (PFN_vkQueueWaitIdle)load(context, "vkQueueWaitIdle"); - vkResetCommandBuffer = (PFN_vkResetCommandBuffer)load(context, "vkResetCommandBuffer"); - vkResetCommandPool = (PFN_vkResetCommandPool)load(context, "vkResetCommandPool"); - vkResetDescriptorPool = (PFN_vkResetDescriptorPool)load(context, "vkResetDescriptorPool"); - vkResetEvent = (PFN_vkResetEvent)load(context, "vkResetEvent"); - vkResetFences = (PFN_vkResetFences)load(context, "vkResetFences"); - vkSetEvent = (PFN_vkSetEvent)load(context, "vkSetEvent"); - vkUnmapMemory = (PFN_vkUnmapMemory)load(context, "vkUnmapMemory"); - vkUpdateDescriptorSets = (PFN_vkUpdateDescriptorSets)load(context, "vkUpdateDescriptorSets"); - vkWaitForFences = (PFN_vkWaitForFences)load(context, "vkWaitForFences"); -#endif /* defined(VK_VERSION_1_0) */ -#if defined(VK_VERSION_1_1) - vkBindBufferMemory2 = (PFN_vkBindBufferMemory2)load(context, "vkBindBufferMemory2"); - vkBindImageMemory2 = (PFN_vkBindImageMemory2)load(context, "vkBindImageMemory2"); - vkCmdDispatchBase = (PFN_vkCmdDispatchBase)load(context, "vkCmdDispatchBase"); - vkCmdSetDeviceMask = (PFN_vkCmdSetDeviceMask)load(context, "vkCmdSetDeviceMask"); - vkCreateDescriptorUpdateTemplate = (PFN_vkCreateDescriptorUpdateTemplate)load(context, "vkCreateDescriptorUpdateTemplate"); - vkCreateSamplerYcbcrConversion = (PFN_vkCreateSamplerYcbcrConversion)load(context, "vkCreateSamplerYcbcrConversion"); - vkDestroyDescriptorUpdateTemplate = (PFN_vkDestroyDescriptorUpdateTemplate)load(context, "vkDestroyDescriptorUpdateTemplate"); - vkDestroySamplerYcbcrConversion = (PFN_vkDestroySamplerYcbcrConversion)load(context, "vkDestroySamplerYcbcrConversion"); - vkGetBufferMemoryRequirements2 = (PFN_vkGetBufferMemoryRequirements2)load(context, "vkGetBufferMemoryRequirements2"); - vkGetDescriptorSetLayoutSupport = (PFN_vkGetDescriptorSetLayoutSupport)load(context, "vkGetDescriptorSetLayoutSupport"); - vkGetDeviceGroupPeerMemoryFeatures = (PFN_vkGetDeviceGroupPeerMemoryFeatures)load(context, "vkGetDeviceGroupPeerMemoryFeatures"); - vkGetDeviceQueue2 = (PFN_vkGetDeviceQueue2)load(context, "vkGetDeviceQueue2"); - vkGetImageMemoryRequirements2 = (PFN_vkGetImageMemoryRequirements2)load(context, "vkGetImageMemoryRequirements2"); - vkGetImageSparseMemoryRequirements2 = (PFN_vkGetImageSparseMemoryRequirements2)load(context, "vkGetImageSparseMemoryRequirements2"); - vkTrimCommandPool = (PFN_vkTrimCommandPool)load(context, "vkTrimCommandPool"); - vkUpdateDescriptorSetWithTemplate = (PFN_vkUpdateDescriptorSetWithTemplate)load(context, "vkUpdateDescriptorSetWithTemplate"); -#endif /* defined(VK_VERSION_1_1) */ -#if defined(VK_VERSION_1_2) - vkCmdBeginRenderPass2 = (PFN_vkCmdBeginRenderPass2)load(context, "vkCmdBeginRenderPass2"); - vkCmdDrawIndexedIndirectCount = (PFN_vkCmdDrawIndexedIndirectCount)load(context, "vkCmdDrawIndexedIndirectCount"); - vkCmdDrawIndirectCount = (PFN_vkCmdDrawIndirectCount)load(context, "vkCmdDrawIndirectCount"); - vkCmdEndRenderPass2 = (PFN_vkCmdEndRenderPass2)load(context, "vkCmdEndRenderPass2"); - vkCmdNextSubpass2 = (PFN_vkCmdNextSubpass2)load(context, "vkCmdNextSubpass2"); - vkCreateRenderPass2 = (PFN_vkCreateRenderPass2)load(context, "vkCreateRenderPass2"); - vkGetBufferDeviceAddress = (PFN_vkGetBufferDeviceAddress)load(context, "vkGetBufferDeviceAddress"); - vkGetBufferOpaqueCaptureAddress = (PFN_vkGetBufferOpaqueCaptureAddress)load(context, "vkGetBufferOpaqueCaptureAddress"); - vkGetDeviceMemoryOpaqueCaptureAddress = (PFN_vkGetDeviceMemoryOpaqueCaptureAddress)load(context, "vkGetDeviceMemoryOpaqueCaptureAddress"); - vkGetSemaphoreCounterValue = (PFN_vkGetSemaphoreCounterValue)load(context, "vkGetSemaphoreCounterValue"); - vkResetQueryPool = (PFN_vkResetQueryPool)load(context, "vkResetQueryPool"); - vkSignalSemaphore = (PFN_vkSignalSemaphore)load(context, "vkSignalSemaphore"); - vkWaitSemaphores = (PFN_vkWaitSemaphores)load(context, "vkWaitSemaphores"); -#endif /* defined(VK_VERSION_1_2) */ -#if defined(VK_VERSION_1_3) - vkCmdBeginRendering = (PFN_vkCmdBeginRendering)load(context, "vkCmdBeginRendering"); - vkCmdBindVertexBuffers2 = (PFN_vkCmdBindVertexBuffers2)load(context, "vkCmdBindVertexBuffers2"); - vkCmdBlitImage2 = (PFN_vkCmdBlitImage2)load(context, "vkCmdBlitImage2"); - vkCmdCopyBuffer2 = (PFN_vkCmdCopyBuffer2)load(context, "vkCmdCopyBuffer2"); - vkCmdCopyBufferToImage2 = (PFN_vkCmdCopyBufferToImage2)load(context, "vkCmdCopyBufferToImage2"); - vkCmdCopyImage2 = (PFN_vkCmdCopyImage2)load(context, "vkCmdCopyImage2"); - vkCmdCopyImageToBuffer2 = (PFN_vkCmdCopyImageToBuffer2)load(context, "vkCmdCopyImageToBuffer2"); - vkCmdEndRendering = (PFN_vkCmdEndRendering)load(context, "vkCmdEndRendering"); - vkCmdPipelineBarrier2 = (PFN_vkCmdPipelineBarrier2)load(context, "vkCmdPipelineBarrier2"); - vkCmdResetEvent2 = (PFN_vkCmdResetEvent2)load(context, "vkCmdResetEvent2"); - vkCmdResolveImage2 = (PFN_vkCmdResolveImage2)load(context, "vkCmdResolveImage2"); - vkCmdSetCullMode = (PFN_vkCmdSetCullMode)load(context, "vkCmdSetCullMode"); - vkCmdSetDepthBiasEnable = (PFN_vkCmdSetDepthBiasEnable)load(context, "vkCmdSetDepthBiasEnable"); - vkCmdSetDepthBoundsTestEnable = (PFN_vkCmdSetDepthBoundsTestEnable)load(context, "vkCmdSetDepthBoundsTestEnable"); - vkCmdSetDepthCompareOp = (PFN_vkCmdSetDepthCompareOp)load(context, "vkCmdSetDepthCompareOp"); - vkCmdSetDepthTestEnable = (PFN_vkCmdSetDepthTestEnable)load(context, "vkCmdSetDepthTestEnable"); - vkCmdSetDepthWriteEnable = (PFN_vkCmdSetDepthWriteEnable)load(context, "vkCmdSetDepthWriteEnable"); - vkCmdSetEvent2 = (PFN_vkCmdSetEvent2)load(context, "vkCmdSetEvent2"); - vkCmdSetFrontFace = (PFN_vkCmdSetFrontFace)load(context, "vkCmdSetFrontFace"); - vkCmdSetPrimitiveRestartEnable = (PFN_vkCmdSetPrimitiveRestartEnable)load(context, "vkCmdSetPrimitiveRestartEnable"); - vkCmdSetPrimitiveTopology = (PFN_vkCmdSetPrimitiveTopology)load(context, "vkCmdSetPrimitiveTopology"); - vkCmdSetRasterizerDiscardEnable = (PFN_vkCmdSetRasterizerDiscardEnable)load(context, "vkCmdSetRasterizerDiscardEnable"); - vkCmdSetScissorWithCount = (PFN_vkCmdSetScissorWithCount)load(context, "vkCmdSetScissorWithCount"); - vkCmdSetStencilOp = (PFN_vkCmdSetStencilOp)load(context, "vkCmdSetStencilOp"); - vkCmdSetStencilTestEnable = (PFN_vkCmdSetStencilTestEnable)load(context, "vkCmdSetStencilTestEnable"); - vkCmdSetViewportWithCount = (PFN_vkCmdSetViewportWithCount)load(context, "vkCmdSetViewportWithCount"); - vkCmdWaitEvents2 = (PFN_vkCmdWaitEvents2)load(context, "vkCmdWaitEvents2"); - vkCmdWriteTimestamp2 = (PFN_vkCmdWriteTimestamp2)load(context, "vkCmdWriteTimestamp2"); - vkCreatePrivateDataSlot = (PFN_vkCreatePrivateDataSlot)load(context, "vkCreatePrivateDataSlot"); - vkDestroyPrivateDataSlot = (PFN_vkDestroyPrivateDataSlot)load(context, "vkDestroyPrivateDataSlot"); - vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)load(context, "vkGetDeviceBufferMemoryRequirements"); - vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)load(context, "vkGetDeviceImageMemoryRequirements"); - vkGetDeviceImageSparseMemoryRequirements = (PFN_vkGetDeviceImageSparseMemoryRequirements)load(context, "vkGetDeviceImageSparseMemoryRequirements"); - vkGetPrivateData = (PFN_vkGetPrivateData)load(context, "vkGetPrivateData"); - vkQueueSubmit2 = (PFN_vkQueueSubmit2)load(context, "vkQueueSubmit2"); - vkSetPrivateData = (PFN_vkSetPrivateData)load(context, "vkSetPrivateData"); -#endif /* defined(VK_VERSION_1_3) */ -#if defined(VK_AMDX_shader_enqueue) - vkCmdDispatchGraphAMDX = (PFN_vkCmdDispatchGraphAMDX)load(context, "vkCmdDispatchGraphAMDX"); - vkCmdDispatchGraphIndirectAMDX = (PFN_vkCmdDispatchGraphIndirectAMDX)load(context, "vkCmdDispatchGraphIndirectAMDX"); - vkCmdDispatchGraphIndirectCountAMDX = (PFN_vkCmdDispatchGraphIndirectCountAMDX)load(context, "vkCmdDispatchGraphIndirectCountAMDX"); - vkCmdInitializeGraphScratchMemoryAMDX = (PFN_vkCmdInitializeGraphScratchMemoryAMDX)load(context, "vkCmdInitializeGraphScratchMemoryAMDX"); - vkCreateExecutionGraphPipelinesAMDX = (PFN_vkCreateExecutionGraphPipelinesAMDX)load(context, "vkCreateExecutionGraphPipelinesAMDX"); - vkGetExecutionGraphPipelineNodeIndexAMDX = (PFN_vkGetExecutionGraphPipelineNodeIndexAMDX)load(context, "vkGetExecutionGraphPipelineNodeIndexAMDX"); - vkGetExecutionGraphPipelineScratchSizeAMDX = (PFN_vkGetExecutionGraphPipelineScratchSizeAMDX)load(context, "vkGetExecutionGraphPipelineScratchSizeAMDX"); -#endif /* defined(VK_AMDX_shader_enqueue) */ -#if defined(VK_AMD_buffer_marker) - vkCmdWriteBufferMarkerAMD = (PFN_vkCmdWriteBufferMarkerAMD)load(context, "vkCmdWriteBufferMarkerAMD"); -#endif /* defined(VK_AMD_buffer_marker) */ -#if defined(VK_AMD_display_native_hdr) - vkSetLocalDimmingAMD = (PFN_vkSetLocalDimmingAMD)load(context, "vkSetLocalDimmingAMD"); -#endif /* defined(VK_AMD_display_native_hdr) */ -#if defined(VK_AMD_draw_indirect_count) - vkCmdDrawIndexedIndirectCountAMD = (PFN_vkCmdDrawIndexedIndirectCountAMD)load(context, "vkCmdDrawIndexedIndirectCountAMD"); - vkCmdDrawIndirectCountAMD = (PFN_vkCmdDrawIndirectCountAMD)load(context, "vkCmdDrawIndirectCountAMD"); -#endif /* defined(VK_AMD_draw_indirect_count) */ -#if defined(VK_AMD_shader_info) - vkGetShaderInfoAMD = (PFN_vkGetShaderInfoAMD)load(context, "vkGetShaderInfoAMD"); -#endif /* defined(VK_AMD_shader_info) */ -#if defined(VK_ANDROID_external_memory_android_hardware_buffer) - vkGetAndroidHardwareBufferPropertiesANDROID = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID)load(context, "vkGetAndroidHardwareBufferPropertiesANDROID"); - vkGetMemoryAndroidHardwareBufferANDROID = (PFN_vkGetMemoryAndroidHardwareBufferANDROID)load(context, "vkGetMemoryAndroidHardwareBufferANDROID"); -#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */ -#if defined(VK_EXT_attachment_feedback_loop_dynamic_state) - vkCmdSetAttachmentFeedbackLoopEnableEXT = (PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT)load(context, "vkCmdSetAttachmentFeedbackLoopEnableEXT"); -#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */ -#if defined(VK_EXT_buffer_device_address) - vkGetBufferDeviceAddressEXT = (PFN_vkGetBufferDeviceAddressEXT)load(context, "vkGetBufferDeviceAddressEXT"); -#endif /* defined(VK_EXT_buffer_device_address) */ -#if defined(VK_EXT_calibrated_timestamps) - vkGetCalibratedTimestampsEXT = (PFN_vkGetCalibratedTimestampsEXT)load(context, "vkGetCalibratedTimestampsEXT"); -#endif /* defined(VK_EXT_calibrated_timestamps) */ -#if defined(VK_EXT_color_write_enable) - vkCmdSetColorWriteEnableEXT = (PFN_vkCmdSetColorWriteEnableEXT)load(context, "vkCmdSetColorWriteEnableEXT"); -#endif /* defined(VK_EXT_color_write_enable) */ -#if defined(VK_EXT_conditional_rendering) - vkCmdBeginConditionalRenderingEXT = (PFN_vkCmdBeginConditionalRenderingEXT)load(context, "vkCmdBeginConditionalRenderingEXT"); - vkCmdEndConditionalRenderingEXT = (PFN_vkCmdEndConditionalRenderingEXT)load(context, "vkCmdEndConditionalRenderingEXT"); -#endif /* defined(VK_EXT_conditional_rendering) */ -#if defined(VK_EXT_debug_marker) - vkCmdDebugMarkerBeginEXT = (PFN_vkCmdDebugMarkerBeginEXT)load(context, "vkCmdDebugMarkerBeginEXT"); - vkCmdDebugMarkerEndEXT = (PFN_vkCmdDebugMarkerEndEXT)load(context, "vkCmdDebugMarkerEndEXT"); - vkCmdDebugMarkerInsertEXT = (PFN_vkCmdDebugMarkerInsertEXT)load(context, "vkCmdDebugMarkerInsertEXT"); - vkDebugMarkerSetObjectNameEXT = (PFN_vkDebugMarkerSetObjectNameEXT)load(context, "vkDebugMarkerSetObjectNameEXT"); - vkDebugMarkerSetObjectTagEXT = (PFN_vkDebugMarkerSetObjectTagEXT)load(context, "vkDebugMarkerSetObjectTagEXT"); -#endif /* defined(VK_EXT_debug_marker) */ -#if defined(VK_EXT_depth_bias_control) - vkCmdSetDepthBias2EXT = (PFN_vkCmdSetDepthBias2EXT)load(context, "vkCmdSetDepthBias2EXT"); -#endif /* defined(VK_EXT_depth_bias_control) */ -#if defined(VK_EXT_descriptor_buffer) - vkCmdBindDescriptorBufferEmbeddedSamplersEXT = (PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT)load(context, "vkCmdBindDescriptorBufferEmbeddedSamplersEXT"); - vkCmdBindDescriptorBuffersEXT = (PFN_vkCmdBindDescriptorBuffersEXT)load(context, "vkCmdBindDescriptorBuffersEXT"); - vkCmdSetDescriptorBufferOffsetsEXT = (PFN_vkCmdSetDescriptorBufferOffsetsEXT)load(context, "vkCmdSetDescriptorBufferOffsetsEXT"); - vkGetBufferOpaqueCaptureDescriptorDataEXT = (PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT)load(context, "vkGetBufferOpaqueCaptureDescriptorDataEXT"); - vkGetDescriptorEXT = (PFN_vkGetDescriptorEXT)load(context, "vkGetDescriptorEXT"); - vkGetDescriptorSetLayoutBindingOffsetEXT = (PFN_vkGetDescriptorSetLayoutBindingOffsetEXT)load(context, "vkGetDescriptorSetLayoutBindingOffsetEXT"); - vkGetDescriptorSetLayoutSizeEXT = (PFN_vkGetDescriptorSetLayoutSizeEXT)load(context, "vkGetDescriptorSetLayoutSizeEXT"); - vkGetImageOpaqueCaptureDescriptorDataEXT = (PFN_vkGetImageOpaqueCaptureDescriptorDataEXT)load(context, "vkGetImageOpaqueCaptureDescriptorDataEXT"); - vkGetImageViewOpaqueCaptureDescriptorDataEXT = (PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT)load(context, "vkGetImageViewOpaqueCaptureDescriptorDataEXT"); - vkGetSamplerOpaqueCaptureDescriptorDataEXT = (PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT)load(context, "vkGetSamplerOpaqueCaptureDescriptorDataEXT"); -#endif /* defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) - vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT = (PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT)load(context, "vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT"); -#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */ -#if defined(VK_EXT_device_fault) - vkGetDeviceFaultInfoEXT = (PFN_vkGetDeviceFaultInfoEXT)load(context, "vkGetDeviceFaultInfoEXT"); -#endif /* defined(VK_EXT_device_fault) */ -#if defined(VK_EXT_discard_rectangles) - vkCmdSetDiscardRectangleEXT = (PFN_vkCmdSetDiscardRectangleEXT)load(context, "vkCmdSetDiscardRectangleEXT"); -#endif /* defined(VK_EXT_discard_rectangles) */ -#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 - vkCmdSetDiscardRectangleEnableEXT = (PFN_vkCmdSetDiscardRectangleEnableEXT)load(context, "vkCmdSetDiscardRectangleEnableEXT"); - vkCmdSetDiscardRectangleModeEXT = (PFN_vkCmdSetDiscardRectangleModeEXT)load(context, "vkCmdSetDiscardRectangleModeEXT"); -#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */ -#if defined(VK_EXT_display_control) - vkDisplayPowerControlEXT = (PFN_vkDisplayPowerControlEXT)load(context, "vkDisplayPowerControlEXT"); - vkGetSwapchainCounterEXT = (PFN_vkGetSwapchainCounterEXT)load(context, "vkGetSwapchainCounterEXT"); - vkRegisterDeviceEventEXT = (PFN_vkRegisterDeviceEventEXT)load(context, "vkRegisterDeviceEventEXT"); - vkRegisterDisplayEventEXT = (PFN_vkRegisterDisplayEventEXT)load(context, "vkRegisterDisplayEventEXT"); -#endif /* defined(VK_EXT_display_control) */ -#if defined(VK_EXT_external_memory_host) - vkGetMemoryHostPointerPropertiesEXT = (PFN_vkGetMemoryHostPointerPropertiesEXT)load(context, "vkGetMemoryHostPointerPropertiesEXT"); -#endif /* defined(VK_EXT_external_memory_host) */ -#if defined(VK_EXT_full_screen_exclusive) - vkAcquireFullScreenExclusiveModeEXT = (PFN_vkAcquireFullScreenExclusiveModeEXT)load(context, "vkAcquireFullScreenExclusiveModeEXT"); - vkReleaseFullScreenExclusiveModeEXT = (PFN_vkReleaseFullScreenExclusiveModeEXT)load(context, "vkReleaseFullScreenExclusiveModeEXT"); -#endif /* defined(VK_EXT_full_screen_exclusive) */ -#if defined(VK_EXT_hdr_metadata) - vkSetHdrMetadataEXT = (PFN_vkSetHdrMetadataEXT)load(context, "vkSetHdrMetadataEXT"); -#endif /* defined(VK_EXT_hdr_metadata) */ -#if defined(VK_EXT_host_image_copy) - vkCopyImageToImageEXT = (PFN_vkCopyImageToImageEXT)load(context, "vkCopyImageToImageEXT"); - vkCopyImageToMemoryEXT = (PFN_vkCopyImageToMemoryEXT)load(context, "vkCopyImageToMemoryEXT"); - vkCopyMemoryToImageEXT = (PFN_vkCopyMemoryToImageEXT)load(context, "vkCopyMemoryToImageEXT"); - vkTransitionImageLayoutEXT = (PFN_vkTransitionImageLayoutEXT)load(context, "vkTransitionImageLayoutEXT"); -#endif /* defined(VK_EXT_host_image_copy) */ -#if defined(VK_EXT_host_query_reset) - vkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)load(context, "vkResetQueryPoolEXT"); -#endif /* defined(VK_EXT_host_query_reset) */ -#if defined(VK_EXT_image_drm_format_modifier) - vkGetImageDrmFormatModifierPropertiesEXT = (PFN_vkGetImageDrmFormatModifierPropertiesEXT)load(context, "vkGetImageDrmFormatModifierPropertiesEXT"); -#endif /* defined(VK_EXT_image_drm_format_modifier) */ -#if defined(VK_EXT_line_rasterization) - vkCmdSetLineStippleEXT = (PFN_vkCmdSetLineStippleEXT)load(context, "vkCmdSetLineStippleEXT"); -#endif /* defined(VK_EXT_line_rasterization) */ -#if defined(VK_EXT_mesh_shader) - vkCmdDrawMeshTasksEXT = (PFN_vkCmdDrawMeshTasksEXT)load(context, "vkCmdDrawMeshTasksEXT"); - vkCmdDrawMeshTasksIndirectCountEXT = (PFN_vkCmdDrawMeshTasksIndirectCountEXT)load(context, "vkCmdDrawMeshTasksIndirectCountEXT"); - vkCmdDrawMeshTasksIndirectEXT = (PFN_vkCmdDrawMeshTasksIndirectEXT)load(context, "vkCmdDrawMeshTasksIndirectEXT"); -#endif /* defined(VK_EXT_mesh_shader) */ -#if defined(VK_EXT_metal_objects) - vkExportMetalObjectsEXT = (PFN_vkExportMetalObjectsEXT)load(context, "vkExportMetalObjectsEXT"); -#endif /* defined(VK_EXT_metal_objects) */ -#if defined(VK_EXT_multi_draw) - vkCmdDrawMultiEXT = (PFN_vkCmdDrawMultiEXT)load(context, "vkCmdDrawMultiEXT"); - vkCmdDrawMultiIndexedEXT = (PFN_vkCmdDrawMultiIndexedEXT)load(context, "vkCmdDrawMultiIndexedEXT"); -#endif /* defined(VK_EXT_multi_draw) */ -#if defined(VK_EXT_opacity_micromap) - vkBuildMicromapsEXT = (PFN_vkBuildMicromapsEXT)load(context, "vkBuildMicromapsEXT"); - vkCmdBuildMicromapsEXT = (PFN_vkCmdBuildMicromapsEXT)load(context, "vkCmdBuildMicromapsEXT"); - vkCmdCopyMemoryToMicromapEXT = (PFN_vkCmdCopyMemoryToMicromapEXT)load(context, "vkCmdCopyMemoryToMicromapEXT"); - vkCmdCopyMicromapEXT = (PFN_vkCmdCopyMicromapEXT)load(context, "vkCmdCopyMicromapEXT"); - vkCmdCopyMicromapToMemoryEXT = (PFN_vkCmdCopyMicromapToMemoryEXT)load(context, "vkCmdCopyMicromapToMemoryEXT"); - vkCmdWriteMicromapsPropertiesEXT = (PFN_vkCmdWriteMicromapsPropertiesEXT)load(context, "vkCmdWriteMicromapsPropertiesEXT"); - vkCopyMemoryToMicromapEXT = (PFN_vkCopyMemoryToMicromapEXT)load(context, "vkCopyMemoryToMicromapEXT"); - vkCopyMicromapEXT = (PFN_vkCopyMicromapEXT)load(context, "vkCopyMicromapEXT"); - vkCopyMicromapToMemoryEXT = (PFN_vkCopyMicromapToMemoryEXT)load(context, "vkCopyMicromapToMemoryEXT"); - vkCreateMicromapEXT = (PFN_vkCreateMicromapEXT)load(context, "vkCreateMicromapEXT"); - vkDestroyMicromapEXT = (PFN_vkDestroyMicromapEXT)load(context, "vkDestroyMicromapEXT"); - vkGetDeviceMicromapCompatibilityEXT = (PFN_vkGetDeviceMicromapCompatibilityEXT)load(context, "vkGetDeviceMicromapCompatibilityEXT"); - vkGetMicromapBuildSizesEXT = (PFN_vkGetMicromapBuildSizesEXT)load(context, "vkGetMicromapBuildSizesEXT"); - vkWriteMicromapsPropertiesEXT = (PFN_vkWriteMicromapsPropertiesEXT)load(context, "vkWriteMicromapsPropertiesEXT"); -#endif /* defined(VK_EXT_opacity_micromap) */ -#if defined(VK_EXT_pageable_device_local_memory) - vkSetDeviceMemoryPriorityEXT = (PFN_vkSetDeviceMemoryPriorityEXT)load(context, "vkSetDeviceMemoryPriorityEXT"); -#endif /* defined(VK_EXT_pageable_device_local_memory) */ -#if defined(VK_EXT_pipeline_properties) - vkGetPipelinePropertiesEXT = (PFN_vkGetPipelinePropertiesEXT)load(context, "vkGetPipelinePropertiesEXT"); -#endif /* defined(VK_EXT_pipeline_properties) */ -#if defined(VK_EXT_private_data) - vkCreatePrivateDataSlotEXT = (PFN_vkCreatePrivateDataSlotEXT)load(context, "vkCreatePrivateDataSlotEXT"); - vkDestroyPrivateDataSlotEXT = (PFN_vkDestroyPrivateDataSlotEXT)load(context, "vkDestroyPrivateDataSlotEXT"); - vkGetPrivateDataEXT = (PFN_vkGetPrivateDataEXT)load(context, "vkGetPrivateDataEXT"); - vkSetPrivateDataEXT = (PFN_vkSetPrivateDataEXT)load(context, "vkSetPrivateDataEXT"); -#endif /* defined(VK_EXT_private_data) */ -#if defined(VK_EXT_sample_locations) - vkCmdSetSampleLocationsEXT = (PFN_vkCmdSetSampleLocationsEXT)load(context, "vkCmdSetSampleLocationsEXT"); -#endif /* defined(VK_EXT_sample_locations) */ -#if defined(VK_EXT_shader_module_identifier) - vkGetShaderModuleCreateInfoIdentifierEXT = (PFN_vkGetShaderModuleCreateInfoIdentifierEXT)load(context, "vkGetShaderModuleCreateInfoIdentifierEXT"); - vkGetShaderModuleIdentifierEXT = (PFN_vkGetShaderModuleIdentifierEXT)load(context, "vkGetShaderModuleIdentifierEXT"); -#endif /* defined(VK_EXT_shader_module_identifier) */ -#if defined(VK_EXT_shader_object) - vkCmdBindShadersEXT = (PFN_vkCmdBindShadersEXT)load(context, "vkCmdBindShadersEXT"); - vkCreateShadersEXT = (PFN_vkCreateShadersEXT)load(context, "vkCreateShadersEXT"); - vkDestroyShaderEXT = (PFN_vkDestroyShaderEXT)load(context, "vkDestroyShaderEXT"); - vkGetShaderBinaryDataEXT = (PFN_vkGetShaderBinaryDataEXT)load(context, "vkGetShaderBinaryDataEXT"); -#endif /* defined(VK_EXT_shader_object) */ -#if defined(VK_EXT_swapchain_maintenance1) - vkReleaseSwapchainImagesEXT = (PFN_vkReleaseSwapchainImagesEXT)load(context, "vkReleaseSwapchainImagesEXT"); -#endif /* defined(VK_EXT_swapchain_maintenance1) */ -#if defined(VK_EXT_transform_feedback) - vkCmdBeginQueryIndexedEXT = (PFN_vkCmdBeginQueryIndexedEXT)load(context, "vkCmdBeginQueryIndexedEXT"); - vkCmdBeginTransformFeedbackEXT = (PFN_vkCmdBeginTransformFeedbackEXT)load(context, "vkCmdBeginTransformFeedbackEXT"); - vkCmdBindTransformFeedbackBuffersEXT = (PFN_vkCmdBindTransformFeedbackBuffersEXT)load(context, "vkCmdBindTransformFeedbackBuffersEXT"); - vkCmdDrawIndirectByteCountEXT = (PFN_vkCmdDrawIndirectByteCountEXT)load(context, "vkCmdDrawIndirectByteCountEXT"); - vkCmdEndQueryIndexedEXT = (PFN_vkCmdEndQueryIndexedEXT)load(context, "vkCmdEndQueryIndexedEXT"); - vkCmdEndTransformFeedbackEXT = (PFN_vkCmdEndTransformFeedbackEXT)load(context, "vkCmdEndTransformFeedbackEXT"); -#endif /* defined(VK_EXT_transform_feedback) */ -#if defined(VK_EXT_validation_cache) - vkCreateValidationCacheEXT = (PFN_vkCreateValidationCacheEXT)load(context, "vkCreateValidationCacheEXT"); - vkDestroyValidationCacheEXT = (PFN_vkDestroyValidationCacheEXT)load(context, "vkDestroyValidationCacheEXT"); - vkGetValidationCacheDataEXT = (PFN_vkGetValidationCacheDataEXT)load(context, "vkGetValidationCacheDataEXT"); - vkMergeValidationCachesEXT = (PFN_vkMergeValidationCachesEXT)load(context, "vkMergeValidationCachesEXT"); -#endif /* defined(VK_EXT_validation_cache) */ -#if defined(VK_FUCHSIA_buffer_collection) - vkCreateBufferCollectionFUCHSIA = (PFN_vkCreateBufferCollectionFUCHSIA)load(context, "vkCreateBufferCollectionFUCHSIA"); - vkDestroyBufferCollectionFUCHSIA = (PFN_vkDestroyBufferCollectionFUCHSIA)load(context, "vkDestroyBufferCollectionFUCHSIA"); - vkGetBufferCollectionPropertiesFUCHSIA = (PFN_vkGetBufferCollectionPropertiesFUCHSIA)load(context, "vkGetBufferCollectionPropertiesFUCHSIA"); - vkSetBufferCollectionBufferConstraintsFUCHSIA = (PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA)load(context, "vkSetBufferCollectionBufferConstraintsFUCHSIA"); - vkSetBufferCollectionImageConstraintsFUCHSIA = (PFN_vkSetBufferCollectionImageConstraintsFUCHSIA)load(context, "vkSetBufferCollectionImageConstraintsFUCHSIA"); -#endif /* defined(VK_FUCHSIA_buffer_collection) */ -#if defined(VK_FUCHSIA_external_memory) - vkGetMemoryZirconHandleFUCHSIA = (PFN_vkGetMemoryZirconHandleFUCHSIA)load(context, "vkGetMemoryZirconHandleFUCHSIA"); - vkGetMemoryZirconHandlePropertiesFUCHSIA = (PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA)load(context, "vkGetMemoryZirconHandlePropertiesFUCHSIA"); -#endif /* defined(VK_FUCHSIA_external_memory) */ -#if defined(VK_FUCHSIA_external_semaphore) - vkGetSemaphoreZirconHandleFUCHSIA = (PFN_vkGetSemaphoreZirconHandleFUCHSIA)load(context, "vkGetSemaphoreZirconHandleFUCHSIA"); - vkImportSemaphoreZirconHandleFUCHSIA = (PFN_vkImportSemaphoreZirconHandleFUCHSIA)load(context, "vkImportSemaphoreZirconHandleFUCHSIA"); -#endif /* defined(VK_FUCHSIA_external_semaphore) */ -#if defined(VK_GOOGLE_display_timing) - vkGetPastPresentationTimingGOOGLE = (PFN_vkGetPastPresentationTimingGOOGLE)load(context, "vkGetPastPresentationTimingGOOGLE"); - vkGetRefreshCycleDurationGOOGLE = (PFN_vkGetRefreshCycleDurationGOOGLE)load(context, "vkGetRefreshCycleDurationGOOGLE"); -#endif /* defined(VK_GOOGLE_display_timing) */ -#if defined(VK_HUAWEI_cluster_culling_shader) - vkCmdDrawClusterHUAWEI = (PFN_vkCmdDrawClusterHUAWEI)load(context, "vkCmdDrawClusterHUAWEI"); - vkCmdDrawClusterIndirectHUAWEI = (PFN_vkCmdDrawClusterIndirectHUAWEI)load(context, "vkCmdDrawClusterIndirectHUAWEI"); -#endif /* defined(VK_HUAWEI_cluster_culling_shader) */ -#if defined(VK_HUAWEI_invocation_mask) - vkCmdBindInvocationMaskHUAWEI = (PFN_vkCmdBindInvocationMaskHUAWEI)load(context, "vkCmdBindInvocationMaskHUAWEI"); -#endif /* defined(VK_HUAWEI_invocation_mask) */ -#if defined(VK_HUAWEI_subpass_shading) - vkCmdSubpassShadingHUAWEI = (PFN_vkCmdSubpassShadingHUAWEI)load(context, "vkCmdSubpassShadingHUAWEI"); - vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI = (PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI)load(context, "vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI"); -#endif /* defined(VK_HUAWEI_subpass_shading) */ -#if defined(VK_INTEL_performance_query) - vkAcquirePerformanceConfigurationINTEL = (PFN_vkAcquirePerformanceConfigurationINTEL)load(context, "vkAcquirePerformanceConfigurationINTEL"); - vkCmdSetPerformanceMarkerINTEL = (PFN_vkCmdSetPerformanceMarkerINTEL)load(context, "vkCmdSetPerformanceMarkerINTEL"); - vkCmdSetPerformanceOverrideINTEL = (PFN_vkCmdSetPerformanceOverrideINTEL)load(context, "vkCmdSetPerformanceOverrideINTEL"); - vkCmdSetPerformanceStreamMarkerINTEL = (PFN_vkCmdSetPerformanceStreamMarkerINTEL)load(context, "vkCmdSetPerformanceStreamMarkerINTEL"); - vkGetPerformanceParameterINTEL = (PFN_vkGetPerformanceParameterINTEL)load(context, "vkGetPerformanceParameterINTEL"); - vkInitializePerformanceApiINTEL = (PFN_vkInitializePerformanceApiINTEL)load(context, "vkInitializePerformanceApiINTEL"); - vkQueueSetPerformanceConfigurationINTEL = (PFN_vkQueueSetPerformanceConfigurationINTEL)load(context, "vkQueueSetPerformanceConfigurationINTEL"); - vkReleasePerformanceConfigurationINTEL = (PFN_vkReleasePerformanceConfigurationINTEL)load(context, "vkReleasePerformanceConfigurationINTEL"); - vkUninitializePerformanceApiINTEL = (PFN_vkUninitializePerformanceApiINTEL)load(context, "vkUninitializePerformanceApiINTEL"); -#endif /* defined(VK_INTEL_performance_query) */ -#if defined(VK_KHR_acceleration_structure) - vkBuildAccelerationStructuresKHR = (PFN_vkBuildAccelerationStructuresKHR)load(context, "vkBuildAccelerationStructuresKHR"); - vkCmdBuildAccelerationStructuresIndirectKHR = (PFN_vkCmdBuildAccelerationStructuresIndirectKHR)load(context, "vkCmdBuildAccelerationStructuresIndirectKHR"); - vkCmdBuildAccelerationStructuresKHR = (PFN_vkCmdBuildAccelerationStructuresKHR)load(context, "vkCmdBuildAccelerationStructuresKHR"); - vkCmdCopyAccelerationStructureKHR = (PFN_vkCmdCopyAccelerationStructureKHR)load(context, "vkCmdCopyAccelerationStructureKHR"); - vkCmdCopyAccelerationStructureToMemoryKHR = (PFN_vkCmdCopyAccelerationStructureToMemoryKHR)load(context, "vkCmdCopyAccelerationStructureToMemoryKHR"); - vkCmdCopyMemoryToAccelerationStructureKHR = (PFN_vkCmdCopyMemoryToAccelerationStructureKHR)load(context, "vkCmdCopyMemoryToAccelerationStructureKHR"); - vkCmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)load(context, "vkCmdWriteAccelerationStructuresPropertiesKHR"); - vkCopyAccelerationStructureKHR = (PFN_vkCopyAccelerationStructureKHR)load(context, "vkCopyAccelerationStructureKHR"); - vkCopyAccelerationStructureToMemoryKHR = (PFN_vkCopyAccelerationStructureToMemoryKHR)load(context, "vkCopyAccelerationStructureToMemoryKHR"); - vkCopyMemoryToAccelerationStructureKHR = (PFN_vkCopyMemoryToAccelerationStructureKHR)load(context, "vkCopyMemoryToAccelerationStructureKHR"); - vkCreateAccelerationStructureKHR = (PFN_vkCreateAccelerationStructureKHR)load(context, "vkCreateAccelerationStructureKHR"); - vkDestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR)load(context, "vkDestroyAccelerationStructureKHR"); - vkGetAccelerationStructureBuildSizesKHR = (PFN_vkGetAccelerationStructureBuildSizesKHR)load(context, "vkGetAccelerationStructureBuildSizesKHR"); - vkGetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR)load(context, "vkGetAccelerationStructureDeviceAddressKHR"); - vkGetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)load(context, "vkGetDeviceAccelerationStructureCompatibilityKHR"); - vkWriteAccelerationStructuresPropertiesKHR = (PFN_vkWriteAccelerationStructuresPropertiesKHR)load(context, "vkWriteAccelerationStructuresPropertiesKHR"); -#endif /* defined(VK_KHR_acceleration_structure) */ -#if defined(VK_KHR_bind_memory2) - vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2KHR)load(context, "vkBindBufferMemory2KHR"); - vkBindImageMemory2KHR = (PFN_vkBindImageMemory2KHR)load(context, "vkBindImageMemory2KHR"); -#endif /* defined(VK_KHR_bind_memory2) */ -#if defined(VK_KHR_buffer_device_address) - vkGetBufferDeviceAddressKHR = (PFN_vkGetBufferDeviceAddressKHR)load(context, "vkGetBufferDeviceAddressKHR"); - vkGetBufferOpaqueCaptureAddressKHR = (PFN_vkGetBufferOpaqueCaptureAddressKHR)load(context, "vkGetBufferOpaqueCaptureAddressKHR"); - vkGetDeviceMemoryOpaqueCaptureAddressKHR = (PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)load(context, "vkGetDeviceMemoryOpaqueCaptureAddressKHR"); -#endif /* defined(VK_KHR_buffer_device_address) */ -#if defined(VK_KHR_copy_commands2) - vkCmdBlitImage2KHR = (PFN_vkCmdBlitImage2KHR)load(context, "vkCmdBlitImage2KHR"); - vkCmdCopyBuffer2KHR = (PFN_vkCmdCopyBuffer2KHR)load(context, "vkCmdCopyBuffer2KHR"); - vkCmdCopyBufferToImage2KHR = (PFN_vkCmdCopyBufferToImage2KHR)load(context, "vkCmdCopyBufferToImage2KHR"); - vkCmdCopyImage2KHR = (PFN_vkCmdCopyImage2KHR)load(context, "vkCmdCopyImage2KHR"); - vkCmdCopyImageToBuffer2KHR = (PFN_vkCmdCopyImageToBuffer2KHR)load(context, "vkCmdCopyImageToBuffer2KHR"); - vkCmdResolveImage2KHR = (PFN_vkCmdResolveImage2KHR)load(context, "vkCmdResolveImage2KHR"); -#endif /* defined(VK_KHR_copy_commands2) */ -#if defined(VK_KHR_create_renderpass2) - vkCmdBeginRenderPass2KHR = (PFN_vkCmdBeginRenderPass2KHR)load(context, "vkCmdBeginRenderPass2KHR"); - vkCmdEndRenderPass2KHR = (PFN_vkCmdEndRenderPass2KHR)load(context, "vkCmdEndRenderPass2KHR"); - vkCmdNextSubpass2KHR = (PFN_vkCmdNextSubpass2KHR)load(context, "vkCmdNextSubpass2KHR"); - vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)load(context, "vkCreateRenderPass2KHR"); -#endif /* defined(VK_KHR_create_renderpass2) */ -#if defined(VK_KHR_deferred_host_operations) - vkCreateDeferredOperationKHR = (PFN_vkCreateDeferredOperationKHR)load(context, "vkCreateDeferredOperationKHR"); - vkDeferredOperationJoinKHR = (PFN_vkDeferredOperationJoinKHR)load(context, "vkDeferredOperationJoinKHR"); - vkDestroyDeferredOperationKHR = (PFN_vkDestroyDeferredOperationKHR)load(context, "vkDestroyDeferredOperationKHR"); - vkGetDeferredOperationMaxConcurrencyKHR = (PFN_vkGetDeferredOperationMaxConcurrencyKHR)load(context, "vkGetDeferredOperationMaxConcurrencyKHR"); - vkGetDeferredOperationResultKHR = (PFN_vkGetDeferredOperationResultKHR)load(context, "vkGetDeferredOperationResultKHR"); -#endif /* defined(VK_KHR_deferred_host_operations) */ -#if defined(VK_KHR_descriptor_update_template) - vkCreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR)load(context, "vkCreateDescriptorUpdateTemplateKHR"); - vkDestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR)load(context, "vkDestroyDescriptorUpdateTemplateKHR"); - vkUpdateDescriptorSetWithTemplateKHR = (PFN_vkUpdateDescriptorSetWithTemplateKHR)load(context, "vkUpdateDescriptorSetWithTemplateKHR"); -#endif /* defined(VK_KHR_descriptor_update_template) */ -#if defined(VK_KHR_device_group) - vkCmdDispatchBaseKHR = (PFN_vkCmdDispatchBaseKHR)load(context, "vkCmdDispatchBaseKHR"); - vkCmdSetDeviceMaskKHR = (PFN_vkCmdSetDeviceMaskKHR)load(context, "vkCmdSetDeviceMaskKHR"); - vkGetDeviceGroupPeerMemoryFeaturesKHR = (PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)load(context, "vkGetDeviceGroupPeerMemoryFeaturesKHR"); -#endif /* defined(VK_KHR_device_group) */ -#if defined(VK_KHR_display_swapchain) - vkCreateSharedSwapchainsKHR = (PFN_vkCreateSharedSwapchainsKHR)load(context, "vkCreateSharedSwapchainsKHR"); -#endif /* defined(VK_KHR_display_swapchain) */ -#if defined(VK_KHR_draw_indirect_count) - vkCmdDrawIndexedIndirectCountKHR = (PFN_vkCmdDrawIndexedIndirectCountKHR)load(context, "vkCmdDrawIndexedIndirectCountKHR"); - vkCmdDrawIndirectCountKHR = (PFN_vkCmdDrawIndirectCountKHR)load(context, "vkCmdDrawIndirectCountKHR"); -#endif /* defined(VK_KHR_draw_indirect_count) */ -#if defined(VK_KHR_dynamic_rendering) - vkCmdBeginRenderingKHR = (PFN_vkCmdBeginRenderingKHR)load(context, "vkCmdBeginRenderingKHR"); - vkCmdEndRenderingKHR = (PFN_vkCmdEndRenderingKHR)load(context, "vkCmdEndRenderingKHR"); -#endif /* defined(VK_KHR_dynamic_rendering) */ -#if defined(VK_KHR_external_fence_fd) - vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)load(context, "vkGetFenceFdKHR"); - vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)load(context, "vkImportFenceFdKHR"); -#endif /* defined(VK_KHR_external_fence_fd) */ -#if defined(VK_KHR_external_fence_win32) - vkGetFenceWin32HandleKHR = (PFN_vkGetFenceWin32HandleKHR)load(context, "vkGetFenceWin32HandleKHR"); - vkImportFenceWin32HandleKHR = (PFN_vkImportFenceWin32HandleKHR)load(context, "vkImportFenceWin32HandleKHR"); -#endif /* defined(VK_KHR_external_fence_win32) */ -#if defined(VK_KHR_external_memory_fd) - vkGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)load(context, "vkGetMemoryFdKHR"); - vkGetMemoryFdPropertiesKHR = (PFN_vkGetMemoryFdPropertiesKHR)load(context, "vkGetMemoryFdPropertiesKHR"); -#endif /* defined(VK_KHR_external_memory_fd) */ -#if defined(VK_KHR_external_memory_win32) - vkGetMemoryWin32HandleKHR = (PFN_vkGetMemoryWin32HandleKHR)load(context, "vkGetMemoryWin32HandleKHR"); - vkGetMemoryWin32HandlePropertiesKHR = (PFN_vkGetMemoryWin32HandlePropertiesKHR)load(context, "vkGetMemoryWin32HandlePropertiesKHR"); -#endif /* defined(VK_KHR_external_memory_win32) */ -#if defined(VK_KHR_external_semaphore_fd) - vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)load(context, "vkGetSemaphoreFdKHR"); - vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)load(context, "vkImportSemaphoreFdKHR"); -#endif /* defined(VK_KHR_external_semaphore_fd) */ -#if defined(VK_KHR_external_semaphore_win32) - vkGetSemaphoreWin32HandleKHR = (PFN_vkGetSemaphoreWin32HandleKHR)load(context, "vkGetSemaphoreWin32HandleKHR"); - vkImportSemaphoreWin32HandleKHR = (PFN_vkImportSemaphoreWin32HandleKHR)load(context, "vkImportSemaphoreWin32HandleKHR"); -#endif /* defined(VK_KHR_external_semaphore_win32) */ -#if defined(VK_KHR_fragment_shading_rate) - vkCmdSetFragmentShadingRateKHR = (PFN_vkCmdSetFragmentShadingRateKHR)load(context, "vkCmdSetFragmentShadingRateKHR"); -#endif /* defined(VK_KHR_fragment_shading_rate) */ -#if defined(VK_KHR_get_memory_requirements2) - vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2KHR)load(context, "vkGetBufferMemoryRequirements2KHR"); - vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2KHR)load(context, "vkGetImageMemoryRequirements2KHR"); - vkGetImageSparseMemoryRequirements2KHR = (PFN_vkGetImageSparseMemoryRequirements2KHR)load(context, "vkGetImageSparseMemoryRequirements2KHR"); -#endif /* defined(VK_KHR_get_memory_requirements2) */ -#if defined(VK_KHR_maintenance1) - vkTrimCommandPoolKHR = (PFN_vkTrimCommandPoolKHR)load(context, "vkTrimCommandPoolKHR"); -#endif /* defined(VK_KHR_maintenance1) */ -#if defined(VK_KHR_maintenance3) - vkGetDescriptorSetLayoutSupportKHR = (PFN_vkGetDescriptorSetLayoutSupportKHR)load(context, "vkGetDescriptorSetLayoutSupportKHR"); -#endif /* defined(VK_KHR_maintenance3) */ -#if defined(VK_KHR_maintenance4) - vkGetDeviceBufferMemoryRequirementsKHR = (PFN_vkGetDeviceBufferMemoryRequirementsKHR)load(context, "vkGetDeviceBufferMemoryRequirementsKHR"); - vkGetDeviceImageMemoryRequirementsKHR = (PFN_vkGetDeviceImageMemoryRequirementsKHR)load(context, "vkGetDeviceImageMemoryRequirementsKHR"); - vkGetDeviceImageSparseMemoryRequirementsKHR = (PFN_vkGetDeviceImageSparseMemoryRequirementsKHR)load(context, "vkGetDeviceImageSparseMemoryRequirementsKHR"); -#endif /* defined(VK_KHR_maintenance4) */ -#if defined(VK_KHR_maintenance5) - vkCmdBindIndexBuffer2KHR = (PFN_vkCmdBindIndexBuffer2KHR)load(context, "vkCmdBindIndexBuffer2KHR"); - vkGetDeviceImageSubresourceLayoutKHR = (PFN_vkGetDeviceImageSubresourceLayoutKHR)load(context, "vkGetDeviceImageSubresourceLayoutKHR"); - vkGetImageSubresourceLayout2KHR = (PFN_vkGetImageSubresourceLayout2KHR)load(context, "vkGetImageSubresourceLayout2KHR"); - vkGetRenderingAreaGranularityKHR = (PFN_vkGetRenderingAreaGranularityKHR)load(context, "vkGetRenderingAreaGranularityKHR"); -#endif /* defined(VK_KHR_maintenance5) */ -#if defined(VK_KHR_map_memory2) - vkMapMemory2KHR = (PFN_vkMapMemory2KHR)load(context, "vkMapMemory2KHR"); - vkUnmapMemory2KHR = (PFN_vkUnmapMemory2KHR)load(context, "vkUnmapMemory2KHR"); -#endif /* defined(VK_KHR_map_memory2) */ -#if defined(VK_KHR_performance_query) - vkAcquireProfilingLockKHR = (PFN_vkAcquireProfilingLockKHR)load(context, "vkAcquireProfilingLockKHR"); - vkReleaseProfilingLockKHR = (PFN_vkReleaseProfilingLockKHR)load(context, "vkReleaseProfilingLockKHR"); -#endif /* defined(VK_KHR_performance_query) */ -#if defined(VK_KHR_pipeline_executable_properties) - vkGetPipelineExecutableInternalRepresentationsKHR = (PFN_vkGetPipelineExecutableInternalRepresentationsKHR)load(context, "vkGetPipelineExecutableInternalRepresentationsKHR"); - vkGetPipelineExecutablePropertiesKHR = (PFN_vkGetPipelineExecutablePropertiesKHR)load(context, "vkGetPipelineExecutablePropertiesKHR"); - vkGetPipelineExecutableStatisticsKHR = (PFN_vkGetPipelineExecutableStatisticsKHR)load(context, "vkGetPipelineExecutableStatisticsKHR"); -#endif /* defined(VK_KHR_pipeline_executable_properties) */ -#if defined(VK_KHR_present_wait) - vkWaitForPresentKHR = (PFN_vkWaitForPresentKHR)load(context, "vkWaitForPresentKHR"); -#endif /* defined(VK_KHR_present_wait) */ -#if defined(VK_KHR_push_descriptor) - vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)load(context, "vkCmdPushDescriptorSetKHR"); -#endif /* defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) - vkCmdTraceRaysIndirect2KHR = (PFN_vkCmdTraceRaysIndirect2KHR)load(context, "vkCmdTraceRaysIndirect2KHR"); -#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_ray_tracing_pipeline) - vkCmdSetRayTracingPipelineStackSizeKHR = (PFN_vkCmdSetRayTracingPipelineStackSizeKHR)load(context, "vkCmdSetRayTracingPipelineStackSizeKHR"); - vkCmdTraceRaysIndirectKHR = (PFN_vkCmdTraceRaysIndirectKHR)load(context, "vkCmdTraceRaysIndirectKHR"); - vkCmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR)load(context, "vkCmdTraceRaysKHR"); - vkCreateRayTracingPipelinesKHR = (PFN_vkCreateRayTracingPipelinesKHR)load(context, "vkCreateRayTracingPipelinesKHR"); - vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)load(context, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR"); - vkGetRayTracingShaderGroupHandlesKHR = (PFN_vkGetRayTracingShaderGroupHandlesKHR)load(context, "vkGetRayTracingShaderGroupHandlesKHR"); - vkGetRayTracingShaderGroupStackSizeKHR = (PFN_vkGetRayTracingShaderGroupStackSizeKHR)load(context, "vkGetRayTracingShaderGroupStackSizeKHR"); -#endif /* defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_sampler_ycbcr_conversion) - vkCreateSamplerYcbcrConversionKHR = (PFN_vkCreateSamplerYcbcrConversionKHR)load(context, "vkCreateSamplerYcbcrConversionKHR"); - vkDestroySamplerYcbcrConversionKHR = (PFN_vkDestroySamplerYcbcrConversionKHR)load(context, "vkDestroySamplerYcbcrConversionKHR"); -#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */ -#if defined(VK_KHR_shared_presentable_image) - vkGetSwapchainStatusKHR = (PFN_vkGetSwapchainStatusKHR)load(context, "vkGetSwapchainStatusKHR"); -#endif /* defined(VK_KHR_shared_presentable_image) */ -#if defined(VK_KHR_swapchain) - vkAcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)load(context, "vkAcquireNextImageKHR"); - vkCreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)load(context, "vkCreateSwapchainKHR"); - vkDestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)load(context, "vkDestroySwapchainKHR"); - vkGetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)load(context, "vkGetSwapchainImagesKHR"); - vkQueuePresentKHR = (PFN_vkQueuePresentKHR)load(context, "vkQueuePresentKHR"); -#endif /* defined(VK_KHR_swapchain) */ -#if defined(VK_KHR_synchronization2) - vkCmdPipelineBarrier2KHR = (PFN_vkCmdPipelineBarrier2KHR)load(context, "vkCmdPipelineBarrier2KHR"); - vkCmdResetEvent2KHR = (PFN_vkCmdResetEvent2KHR)load(context, "vkCmdResetEvent2KHR"); - vkCmdSetEvent2KHR = (PFN_vkCmdSetEvent2KHR)load(context, "vkCmdSetEvent2KHR"); - vkCmdWaitEvents2KHR = (PFN_vkCmdWaitEvents2KHR)load(context, "vkCmdWaitEvents2KHR"); - vkCmdWriteTimestamp2KHR = (PFN_vkCmdWriteTimestamp2KHR)load(context, "vkCmdWriteTimestamp2KHR"); - vkQueueSubmit2KHR = (PFN_vkQueueSubmit2KHR)load(context, "vkQueueSubmit2KHR"); -#endif /* defined(VK_KHR_synchronization2) */ -#if defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) - vkCmdWriteBufferMarker2AMD = (PFN_vkCmdWriteBufferMarker2AMD)load(context, "vkCmdWriteBufferMarker2AMD"); -#endif /* defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) */ -#if defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) - vkGetQueueCheckpointData2NV = (PFN_vkGetQueueCheckpointData2NV)load(context, "vkGetQueueCheckpointData2NV"); -#endif /* defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_KHR_timeline_semaphore) - vkGetSemaphoreCounterValueKHR = (PFN_vkGetSemaphoreCounterValueKHR)load(context, "vkGetSemaphoreCounterValueKHR"); - vkSignalSemaphoreKHR = (PFN_vkSignalSemaphoreKHR)load(context, "vkSignalSemaphoreKHR"); - vkWaitSemaphoresKHR = (PFN_vkWaitSemaphoresKHR)load(context, "vkWaitSemaphoresKHR"); -#endif /* defined(VK_KHR_timeline_semaphore) */ -#if defined(VK_KHR_video_decode_queue) - vkCmdDecodeVideoKHR = (PFN_vkCmdDecodeVideoKHR)load(context, "vkCmdDecodeVideoKHR"); -#endif /* defined(VK_KHR_video_decode_queue) */ -#if defined(VK_KHR_video_encode_queue) - vkCmdEncodeVideoKHR = (PFN_vkCmdEncodeVideoKHR)load(context, "vkCmdEncodeVideoKHR"); - vkGetEncodedVideoSessionParametersKHR = (PFN_vkGetEncodedVideoSessionParametersKHR)load(context, "vkGetEncodedVideoSessionParametersKHR"); -#endif /* defined(VK_KHR_video_encode_queue) */ -#if defined(VK_KHR_video_queue) - vkBindVideoSessionMemoryKHR = (PFN_vkBindVideoSessionMemoryKHR)load(context, "vkBindVideoSessionMemoryKHR"); - vkCmdBeginVideoCodingKHR = (PFN_vkCmdBeginVideoCodingKHR)load(context, "vkCmdBeginVideoCodingKHR"); - vkCmdControlVideoCodingKHR = (PFN_vkCmdControlVideoCodingKHR)load(context, "vkCmdControlVideoCodingKHR"); - vkCmdEndVideoCodingKHR = (PFN_vkCmdEndVideoCodingKHR)load(context, "vkCmdEndVideoCodingKHR"); - vkCreateVideoSessionKHR = (PFN_vkCreateVideoSessionKHR)load(context, "vkCreateVideoSessionKHR"); - vkCreateVideoSessionParametersKHR = (PFN_vkCreateVideoSessionParametersKHR)load(context, "vkCreateVideoSessionParametersKHR"); - vkDestroyVideoSessionKHR = (PFN_vkDestroyVideoSessionKHR)load(context, "vkDestroyVideoSessionKHR"); - vkDestroyVideoSessionParametersKHR = (PFN_vkDestroyVideoSessionParametersKHR)load(context, "vkDestroyVideoSessionParametersKHR"); - vkGetVideoSessionMemoryRequirementsKHR = (PFN_vkGetVideoSessionMemoryRequirementsKHR)load(context, "vkGetVideoSessionMemoryRequirementsKHR"); - vkUpdateVideoSessionParametersKHR = (PFN_vkUpdateVideoSessionParametersKHR)load(context, "vkUpdateVideoSessionParametersKHR"); -#endif /* defined(VK_KHR_video_queue) */ -#if defined(VK_NVX_binary_import) - vkCmdCuLaunchKernelNVX = (PFN_vkCmdCuLaunchKernelNVX)load(context, "vkCmdCuLaunchKernelNVX"); - vkCreateCuFunctionNVX = (PFN_vkCreateCuFunctionNVX)load(context, "vkCreateCuFunctionNVX"); - vkCreateCuModuleNVX = (PFN_vkCreateCuModuleNVX)load(context, "vkCreateCuModuleNVX"); - vkDestroyCuFunctionNVX = (PFN_vkDestroyCuFunctionNVX)load(context, "vkDestroyCuFunctionNVX"); - vkDestroyCuModuleNVX = (PFN_vkDestroyCuModuleNVX)load(context, "vkDestroyCuModuleNVX"); -#endif /* defined(VK_NVX_binary_import) */ -#if defined(VK_NVX_image_view_handle) - vkGetImageViewAddressNVX = (PFN_vkGetImageViewAddressNVX)load(context, "vkGetImageViewAddressNVX"); - vkGetImageViewHandleNVX = (PFN_vkGetImageViewHandleNVX)load(context, "vkGetImageViewHandleNVX"); -#endif /* defined(VK_NVX_image_view_handle) */ -#if defined(VK_NV_clip_space_w_scaling) - vkCmdSetViewportWScalingNV = (PFN_vkCmdSetViewportWScalingNV)load(context, "vkCmdSetViewportWScalingNV"); -#endif /* defined(VK_NV_clip_space_w_scaling) */ -#if defined(VK_NV_copy_memory_indirect) - vkCmdCopyMemoryIndirectNV = (PFN_vkCmdCopyMemoryIndirectNV)load(context, "vkCmdCopyMemoryIndirectNV"); - vkCmdCopyMemoryToImageIndirectNV = (PFN_vkCmdCopyMemoryToImageIndirectNV)load(context, "vkCmdCopyMemoryToImageIndirectNV"); -#endif /* defined(VK_NV_copy_memory_indirect) */ -#if defined(VK_NV_cuda_kernel_launch) - vkCmdCudaLaunchKernelNV = (PFN_vkCmdCudaLaunchKernelNV)load(context, "vkCmdCudaLaunchKernelNV"); - vkCreateCudaFunctionNV = (PFN_vkCreateCudaFunctionNV)load(context, "vkCreateCudaFunctionNV"); - vkCreateCudaModuleNV = (PFN_vkCreateCudaModuleNV)load(context, "vkCreateCudaModuleNV"); - vkDestroyCudaFunctionNV = (PFN_vkDestroyCudaFunctionNV)load(context, "vkDestroyCudaFunctionNV"); - vkDestroyCudaModuleNV = (PFN_vkDestroyCudaModuleNV)load(context, "vkDestroyCudaModuleNV"); - vkGetCudaModuleCacheNV = (PFN_vkGetCudaModuleCacheNV)load(context, "vkGetCudaModuleCacheNV"); -#endif /* defined(VK_NV_cuda_kernel_launch) */ -#if defined(VK_NV_device_diagnostic_checkpoints) - vkCmdSetCheckpointNV = (PFN_vkCmdSetCheckpointNV)load(context, "vkCmdSetCheckpointNV"); - vkGetQueueCheckpointDataNV = (PFN_vkGetQueueCheckpointDataNV)load(context, "vkGetQueueCheckpointDataNV"); -#endif /* defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_NV_device_generated_commands) - vkCmdBindPipelineShaderGroupNV = (PFN_vkCmdBindPipelineShaderGroupNV)load(context, "vkCmdBindPipelineShaderGroupNV"); - vkCmdExecuteGeneratedCommandsNV = (PFN_vkCmdExecuteGeneratedCommandsNV)load(context, "vkCmdExecuteGeneratedCommandsNV"); - vkCmdPreprocessGeneratedCommandsNV = (PFN_vkCmdPreprocessGeneratedCommandsNV)load(context, "vkCmdPreprocessGeneratedCommandsNV"); - vkCreateIndirectCommandsLayoutNV = (PFN_vkCreateIndirectCommandsLayoutNV)load(context, "vkCreateIndirectCommandsLayoutNV"); - vkDestroyIndirectCommandsLayoutNV = (PFN_vkDestroyIndirectCommandsLayoutNV)load(context, "vkDestroyIndirectCommandsLayoutNV"); - vkGetGeneratedCommandsMemoryRequirementsNV = (PFN_vkGetGeneratedCommandsMemoryRequirementsNV)load(context, "vkGetGeneratedCommandsMemoryRequirementsNV"); -#endif /* defined(VK_NV_device_generated_commands) */ -#if defined(VK_NV_device_generated_commands_compute) - vkCmdUpdatePipelineIndirectBufferNV = (PFN_vkCmdUpdatePipelineIndirectBufferNV)load(context, "vkCmdUpdatePipelineIndirectBufferNV"); - vkGetPipelineIndirectDeviceAddressNV = (PFN_vkGetPipelineIndirectDeviceAddressNV)load(context, "vkGetPipelineIndirectDeviceAddressNV"); - vkGetPipelineIndirectMemoryRequirementsNV = (PFN_vkGetPipelineIndirectMemoryRequirementsNV)load(context, "vkGetPipelineIndirectMemoryRequirementsNV"); -#endif /* defined(VK_NV_device_generated_commands_compute) */ -#if defined(VK_NV_external_memory_rdma) - vkGetMemoryRemoteAddressNV = (PFN_vkGetMemoryRemoteAddressNV)load(context, "vkGetMemoryRemoteAddressNV"); -#endif /* defined(VK_NV_external_memory_rdma) */ -#if defined(VK_NV_external_memory_win32) - vkGetMemoryWin32HandleNV = (PFN_vkGetMemoryWin32HandleNV)load(context, "vkGetMemoryWin32HandleNV"); -#endif /* defined(VK_NV_external_memory_win32) */ -#if defined(VK_NV_fragment_shading_rate_enums) - vkCmdSetFragmentShadingRateEnumNV = (PFN_vkCmdSetFragmentShadingRateEnumNV)load(context, "vkCmdSetFragmentShadingRateEnumNV"); -#endif /* defined(VK_NV_fragment_shading_rate_enums) */ -#if defined(VK_NV_low_latency2) - vkGetLatencyTimingsNV = (PFN_vkGetLatencyTimingsNV)load(context, "vkGetLatencyTimingsNV"); - vkLatencySleepNV = (PFN_vkLatencySleepNV)load(context, "vkLatencySleepNV"); - vkQueueNotifyOutOfBandNV = (PFN_vkQueueNotifyOutOfBandNV)load(context, "vkQueueNotifyOutOfBandNV"); - vkSetLatencyMarkerNV = (PFN_vkSetLatencyMarkerNV)load(context, "vkSetLatencyMarkerNV"); - vkSetLatencySleepModeNV = (PFN_vkSetLatencySleepModeNV)load(context, "vkSetLatencySleepModeNV"); -#endif /* defined(VK_NV_low_latency2) */ -#if defined(VK_NV_memory_decompression) - vkCmdDecompressMemoryIndirectCountNV = (PFN_vkCmdDecompressMemoryIndirectCountNV)load(context, "vkCmdDecompressMemoryIndirectCountNV"); - vkCmdDecompressMemoryNV = (PFN_vkCmdDecompressMemoryNV)load(context, "vkCmdDecompressMemoryNV"); -#endif /* defined(VK_NV_memory_decompression) */ -#if defined(VK_NV_mesh_shader) - vkCmdDrawMeshTasksIndirectCountNV = (PFN_vkCmdDrawMeshTasksIndirectCountNV)load(context, "vkCmdDrawMeshTasksIndirectCountNV"); - vkCmdDrawMeshTasksIndirectNV = (PFN_vkCmdDrawMeshTasksIndirectNV)load(context, "vkCmdDrawMeshTasksIndirectNV"); - vkCmdDrawMeshTasksNV = (PFN_vkCmdDrawMeshTasksNV)load(context, "vkCmdDrawMeshTasksNV"); -#endif /* defined(VK_NV_mesh_shader) */ -#if defined(VK_NV_optical_flow) - vkBindOpticalFlowSessionImageNV = (PFN_vkBindOpticalFlowSessionImageNV)load(context, "vkBindOpticalFlowSessionImageNV"); - vkCmdOpticalFlowExecuteNV = (PFN_vkCmdOpticalFlowExecuteNV)load(context, "vkCmdOpticalFlowExecuteNV"); - vkCreateOpticalFlowSessionNV = (PFN_vkCreateOpticalFlowSessionNV)load(context, "vkCreateOpticalFlowSessionNV"); - vkDestroyOpticalFlowSessionNV = (PFN_vkDestroyOpticalFlowSessionNV)load(context, "vkDestroyOpticalFlowSessionNV"); -#endif /* defined(VK_NV_optical_flow) */ -#if defined(VK_NV_ray_tracing) - vkBindAccelerationStructureMemoryNV = (PFN_vkBindAccelerationStructureMemoryNV)load(context, "vkBindAccelerationStructureMemoryNV"); - vkCmdBuildAccelerationStructureNV = (PFN_vkCmdBuildAccelerationStructureNV)load(context, "vkCmdBuildAccelerationStructureNV"); - vkCmdCopyAccelerationStructureNV = (PFN_vkCmdCopyAccelerationStructureNV)load(context, "vkCmdCopyAccelerationStructureNV"); - vkCmdTraceRaysNV = (PFN_vkCmdTraceRaysNV)load(context, "vkCmdTraceRaysNV"); - vkCmdWriteAccelerationStructuresPropertiesNV = (PFN_vkCmdWriteAccelerationStructuresPropertiesNV)load(context, "vkCmdWriteAccelerationStructuresPropertiesNV"); - vkCompileDeferredNV = (PFN_vkCompileDeferredNV)load(context, "vkCompileDeferredNV"); - vkCreateAccelerationStructureNV = (PFN_vkCreateAccelerationStructureNV)load(context, "vkCreateAccelerationStructureNV"); - vkCreateRayTracingPipelinesNV = (PFN_vkCreateRayTracingPipelinesNV)load(context, "vkCreateRayTracingPipelinesNV"); - vkDestroyAccelerationStructureNV = (PFN_vkDestroyAccelerationStructureNV)load(context, "vkDestroyAccelerationStructureNV"); - vkGetAccelerationStructureHandleNV = (PFN_vkGetAccelerationStructureHandleNV)load(context, "vkGetAccelerationStructureHandleNV"); - vkGetAccelerationStructureMemoryRequirementsNV = (PFN_vkGetAccelerationStructureMemoryRequirementsNV)load(context, "vkGetAccelerationStructureMemoryRequirementsNV"); - vkGetRayTracingShaderGroupHandlesNV = (PFN_vkGetRayTracingShaderGroupHandlesNV)load(context, "vkGetRayTracingShaderGroupHandlesNV"); -#endif /* defined(VK_NV_ray_tracing) */ -#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 - vkCmdSetExclusiveScissorEnableNV = (PFN_vkCmdSetExclusiveScissorEnableNV)load(context, "vkCmdSetExclusiveScissorEnableNV"); -#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */ -#if defined(VK_NV_scissor_exclusive) - vkCmdSetExclusiveScissorNV = (PFN_vkCmdSetExclusiveScissorNV)load(context, "vkCmdSetExclusiveScissorNV"); -#endif /* defined(VK_NV_scissor_exclusive) */ -#if defined(VK_NV_shading_rate_image) - vkCmdBindShadingRateImageNV = (PFN_vkCmdBindShadingRateImageNV)load(context, "vkCmdBindShadingRateImageNV"); - vkCmdSetCoarseSampleOrderNV = (PFN_vkCmdSetCoarseSampleOrderNV)load(context, "vkCmdSetCoarseSampleOrderNV"); - vkCmdSetViewportShadingRatePaletteNV = (PFN_vkCmdSetViewportShadingRatePaletteNV)load(context, "vkCmdSetViewportShadingRatePaletteNV"); -#endif /* defined(VK_NV_shading_rate_image) */ -#if defined(VK_QCOM_tile_properties) - vkGetDynamicRenderingTilePropertiesQCOM = (PFN_vkGetDynamicRenderingTilePropertiesQCOM)load(context, "vkGetDynamicRenderingTilePropertiesQCOM"); - vkGetFramebufferTilePropertiesQCOM = (PFN_vkGetFramebufferTilePropertiesQCOM)load(context, "vkGetFramebufferTilePropertiesQCOM"); -#endif /* defined(VK_QCOM_tile_properties) */ -#if defined(VK_QNX_external_memory_screen_buffer) - vkGetScreenBufferPropertiesQNX = (PFN_vkGetScreenBufferPropertiesQNX)load(context, "vkGetScreenBufferPropertiesQNX"); -#endif /* defined(VK_QNX_external_memory_screen_buffer) */ -#if defined(VK_VALVE_descriptor_set_host_mapping) - vkGetDescriptorSetHostMappingVALVE = (PFN_vkGetDescriptorSetHostMappingVALVE)load(context, "vkGetDescriptorSetHostMappingVALVE"); - vkGetDescriptorSetLayoutHostMappingInfoVALVE = (PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE)load(context, "vkGetDescriptorSetLayoutHostMappingInfoVALVE"); -#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */ -#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) - vkCmdBindVertexBuffers2EXT = (PFN_vkCmdBindVertexBuffers2EXT)load(context, "vkCmdBindVertexBuffers2EXT"); - vkCmdSetCullModeEXT = (PFN_vkCmdSetCullModeEXT)load(context, "vkCmdSetCullModeEXT"); - vkCmdSetDepthBoundsTestEnableEXT = (PFN_vkCmdSetDepthBoundsTestEnableEXT)load(context, "vkCmdSetDepthBoundsTestEnableEXT"); - vkCmdSetDepthCompareOpEXT = (PFN_vkCmdSetDepthCompareOpEXT)load(context, "vkCmdSetDepthCompareOpEXT"); - vkCmdSetDepthTestEnableEXT = (PFN_vkCmdSetDepthTestEnableEXT)load(context, "vkCmdSetDepthTestEnableEXT"); - vkCmdSetDepthWriteEnableEXT = (PFN_vkCmdSetDepthWriteEnableEXT)load(context, "vkCmdSetDepthWriteEnableEXT"); - vkCmdSetFrontFaceEXT = (PFN_vkCmdSetFrontFaceEXT)load(context, "vkCmdSetFrontFaceEXT"); - vkCmdSetPrimitiveTopologyEXT = (PFN_vkCmdSetPrimitiveTopologyEXT)load(context, "vkCmdSetPrimitiveTopologyEXT"); - vkCmdSetScissorWithCountEXT = (PFN_vkCmdSetScissorWithCountEXT)load(context, "vkCmdSetScissorWithCountEXT"); - vkCmdSetStencilOpEXT = (PFN_vkCmdSetStencilOpEXT)load(context, "vkCmdSetStencilOpEXT"); - vkCmdSetStencilTestEnableEXT = (PFN_vkCmdSetStencilTestEnableEXT)load(context, "vkCmdSetStencilTestEnableEXT"); - vkCmdSetViewportWithCountEXT = (PFN_vkCmdSetViewportWithCountEXT)load(context, "vkCmdSetViewportWithCountEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) - vkCmdSetDepthBiasEnableEXT = (PFN_vkCmdSetDepthBiasEnableEXT)load(context, "vkCmdSetDepthBiasEnableEXT"); - vkCmdSetLogicOpEXT = (PFN_vkCmdSetLogicOpEXT)load(context, "vkCmdSetLogicOpEXT"); - vkCmdSetPatchControlPointsEXT = (PFN_vkCmdSetPatchControlPointsEXT)load(context, "vkCmdSetPatchControlPointsEXT"); - vkCmdSetPrimitiveRestartEnableEXT = (PFN_vkCmdSetPrimitiveRestartEnableEXT)load(context, "vkCmdSetPrimitiveRestartEnableEXT"); - vkCmdSetRasterizerDiscardEnableEXT = (PFN_vkCmdSetRasterizerDiscardEnableEXT)load(context, "vkCmdSetRasterizerDiscardEnableEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) - vkCmdSetAlphaToCoverageEnableEXT = (PFN_vkCmdSetAlphaToCoverageEnableEXT)load(context, "vkCmdSetAlphaToCoverageEnableEXT"); - vkCmdSetAlphaToOneEnableEXT = (PFN_vkCmdSetAlphaToOneEnableEXT)load(context, "vkCmdSetAlphaToOneEnableEXT"); - vkCmdSetColorBlendAdvancedEXT = (PFN_vkCmdSetColorBlendAdvancedEXT)load(context, "vkCmdSetColorBlendAdvancedEXT"); - vkCmdSetColorBlendEnableEXT = (PFN_vkCmdSetColorBlendEnableEXT)load(context, "vkCmdSetColorBlendEnableEXT"); - vkCmdSetColorBlendEquationEXT = (PFN_vkCmdSetColorBlendEquationEXT)load(context, "vkCmdSetColorBlendEquationEXT"); - vkCmdSetColorWriteMaskEXT = (PFN_vkCmdSetColorWriteMaskEXT)load(context, "vkCmdSetColorWriteMaskEXT"); - vkCmdSetConservativeRasterizationModeEXT = (PFN_vkCmdSetConservativeRasterizationModeEXT)load(context, "vkCmdSetConservativeRasterizationModeEXT"); - vkCmdSetDepthClampEnableEXT = (PFN_vkCmdSetDepthClampEnableEXT)load(context, "vkCmdSetDepthClampEnableEXT"); - vkCmdSetDepthClipEnableEXT = (PFN_vkCmdSetDepthClipEnableEXT)load(context, "vkCmdSetDepthClipEnableEXT"); - vkCmdSetDepthClipNegativeOneToOneEXT = (PFN_vkCmdSetDepthClipNegativeOneToOneEXT)load(context, "vkCmdSetDepthClipNegativeOneToOneEXT"); - vkCmdSetExtraPrimitiveOverestimationSizeEXT = (PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT)load(context, "vkCmdSetExtraPrimitiveOverestimationSizeEXT"); - vkCmdSetLineRasterizationModeEXT = (PFN_vkCmdSetLineRasterizationModeEXT)load(context, "vkCmdSetLineRasterizationModeEXT"); - vkCmdSetLineStippleEnableEXT = (PFN_vkCmdSetLineStippleEnableEXT)load(context, "vkCmdSetLineStippleEnableEXT"); - vkCmdSetLogicOpEnableEXT = (PFN_vkCmdSetLogicOpEnableEXT)load(context, "vkCmdSetLogicOpEnableEXT"); - vkCmdSetPolygonModeEXT = (PFN_vkCmdSetPolygonModeEXT)load(context, "vkCmdSetPolygonModeEXT"); - vkCmdSetProvokingVertexModeEXT = (PFN_vkCmdSetProvokingVertexModeEXT)load(context, "vkCmdSetProvokingVertexModeEXT"); - vkCmdSetRasterizationSamplesEXT = (PFN_vkCmdSetRasterizationSamplesEXT)load(context, "vkCmdSetRasterizationSamplesEXT"); - vkCmdSetRasterizationStreamEXT = (PFN_vkCmdSetRasterizationStreamEXT)load(context, "vkCmdSetRasterizationStreamEXT"); - vkCmdSetSampleLocationsEnableEXT = (PFN_vkCmdSetSampleLocationsEnableEXT)load(context, "vkCmdSetSampleLocationsEnableEXT"); - vkCmdSetSampleMaskEXT = (PFN_vkCmdSetSampleMaskEXT)load(context, "vkCmdSetSampleMaskEXT"); - vkCmdSetTessellationDomainOriginEXT = (PFN_vkCmdSetTessellationDomainOriginEXT)load(context, "vkCmdSetTessellationDomainOriginEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) - vkCmdSetViewportWScalingEnableNV = (PFN_vkCmdSetViewportWScalingEnableNV)load(context, "vkCmdSetViewportWScalingEnableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) - vkCmdSetViewportSwizzleNV = (PFN_vkCmdSetViewportSwizzleNV)load(context, "vkCmdSetViewportSwizzleNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) - vkCmdSetCoverageToColorEnableNV = (PFN_vkCmdSetCoverageToColorEnableNV)load(context, "vkCmdSetCoverageToColorEnableNV"); - vkCmdSetCoverageToColorLocationNV = (PFN_vkCmdSetCoverageToColorLocationNV)load(context, "vkCmdSetCoverageToColorLocationNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) - vkCmdSetCoverageModulationModeNV = (PFN_vkCmdSetCoverageModulationModeNV)load(context, "vkCmdSetCoverageModulationModeNV"); - vkCmdSetCoverageModulationTableEnableNV = (PFN_vkCmdSetCoverageModulationTableEnableNV)load(context, "vkCmdSetCoverageModulationTableEnableNV"); - vkCmdSetCoverageModulationTableNV = (PFN_vkCmdSetCoverageModulationTableNV)load(context, "vkCmdSetCoverageModulationTableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) - vkCmdSetShadingRateImageEnableNV = (PFN_vkCmdSetShadingRateImageEnableNV)load(context, "vkCmdSetShadingRateImageEnableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) - vkCmdSetRepresentativeFragmentTestEnableNV = (PFN_vkCmdSetRepresentativeFragmentTestEnableNV)load(context, "vkCmdSetRepresentativeFragmentTestEnableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) - vkCmdSetCoverageReductionModeNV = (PFN_vkCmdSetCoverageReductionModeNV)load(context, "vkCmdSetCoverageReductionModeNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */ -#if (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1)) - vkGetDeviceGroupSurfacePresentModes2EXT = (PFN_vkGetDeviceGroupSurfacePresentModes2EXT)load(context, "vkGetDeviceGroupSurfacePresentModes2EXT"); -#endif /* (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) - vkGetImageSubresourceLayout2EXT = (PFN_vkGetImageSubresourceLayout2EXT)load(context, "vkGetImageSubresourceLayout2EXT"); -#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */ -#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) - vkCmdSetVertexInputEXT = (PFN_vkCmdSetVertexInputEXT)load(context, "vkCmdSetVertexInputEXT"); -#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */ -#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template)) - vkCmdPushDescriptorSetWithTemplateKHR = (PFN_vkCmdPushDescriptorSetWithTemplateKHR)load(context, "vkCmdPushDescriptorSetWithTemplateKHR"); -#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) - vkGetDeviceGroupPresentCapabilitiesKHR = (PFN_vkGetDeviceGroupPresentCapabilitiesKHR)load(context, "vkGetDeviceGroupPresentCapabilitiesKHR"); - vkGetDeviceGroupSurfacePresentModesKHR = (PFN_vkGetDeviceGroupSurfacePresentModesKHR)load(context, "vkGetDeviceGroupSurfacePresentModesKHR"); -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) - vkAcquireNextImage2KHR = (PFN_vkAcquireNextImage2KHR)load(context, "vkAcquireNextImage2KHR"); -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ - /* VOLK_GENERATE_LOAD_DEVICE */ -} - -static void volkGenLoadDeviceTable(struct VolkDeviceTable* table, void* context, PFN_vkVoidFunction (*load)(void*, const char*)) -{ - /* VOLK_GENERATE_LOAD_DEVICE_TABLE */ -#if defined(VK_VERSION_1_0) - table->vkAllocateCommandBuffers = (PFN_vkAllocateCommandBuffers)load(context, "vkAllocateCommandBuffers"); - table->vkAllocateDescriptorSets = (PFN_vkAllocateDescriptorSets)load(context, "vkAllocateDescriptorSets"); - table->vkAllocateMemory = (PFN_vkAllocateMemory)load(context, "vkAllocateMemory"); - table->vkBeginCommandBuffer = (PFN_vkBeginCommandBuffer)load(context, "vkBeginCommandBuffer"); - table->vkBindBufferMemory = (PFN_vkBindBufferMemory)load(context, "vkBindBufferMemory"); - table->vkBindImageMemory = (PFN_vkBindImageMemory)load(context, "vkBindImageMemory"); - table->vkCmdBeginQuery = (PFN_vkCmdBeginQuery)load(context, "vkCmdBeginQuery"); - table->vkCmdBeginRenderPass = (PFN_vkCmdBeginRenderPass)load(context, "vkCmdBeginRenderPass"); - table->vkCmdBindDescriptorSets = (PFN_vkCmdBindDescriptorSets)load(context, "vkCmdBindDescriptorSets"); - table->vkCmdBindIndexBuffer = (PFN_vkCmdBindIndexBuffer)load(context, "vkCmdBindIndexBuffer"); - table->vkCmdBindPipeline = (PFN_vkCmdBindPipeline)load(context, "vkCmdBindPipeline"); - table->vkCmdBindVertexBuffers = (PFN_vkCmdBindVertexBuffers)load(context, "vkCmdBindVertexBuffers"); - table->vkCmdBlitImage = (PFN_vkCmdBlitImage)load(context, "vkCmdBlitImage"); - table->vkCmdClearAttachments = (PFN_vkCmdClearAttachments)load(context, "vkCmdClearAttachments"); - table->vkCmdClearColorImage = (PFN_vkCmdClearColorImage)load(context, "vkCmdClearColorImage"); - table->vkCmdClearDepthStencilImage = (PFN_vkCmdClearDepthStencilImage)load(context, "vkCmdClearDepthStencilImage"); - table->vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)load(context, "vkCmdCopyBuffer"); - table->vkCmdCopyBufferToImage = (PFN_vkCmdCopyBufferToImage)load(context, "vkCmdCopyBufferToImage"); - table->vkCmdCopyImage = (PFN_vkCmdCopyImage)load(context, "vkCmdCopyImage"); - table->vkCmdCopyImageToBuffer = (PFN_vkCmdCopyImageToBuffer)load(context, "vkCmdCopyImageToBuffer"); - table->vkCmdCopyQueryPoolResults = (PFN_vkCmdCopyQueryPoolResults)load(context, "vkCmdCopyQueryPoolResults"); - table->vkCmdDispatch = (PFN_vkCmdDispatch)load(context, "vkCmdDispatch"); - table->vkCmdDispatchIndirect = (PFN_vkCmdDispatchIndirect)load(context, "vkCmdDispatchIndirect"); - table->vkCmdDraw = (PFN_vkCmdDraw)load(context, "vkCmdDraw"); - table->vkCmdDrawIndexed = (PFN_vkCmdDrawIndexed)load(context, "vkCmdDrawIndexed"); - table->vkCmdDrawIndexedIndirect = (PFN_vkCmdDrawIndexedIndirect)load(context, "vkCmdDrawIndexedIndirect"); - table->vkCmdDrawIndirect = (PFN_vkCmdDrawIndirect)load(context, "vkCmdDrawIndirect"); - table->vkCmdEndQuery = (PFN_vkCmdEndQuery)load(context, "vkCmdEndQuery"); - table->vkCmdEndRenderPass = (PFN_vkCmdEndRenderPass)load(context, "vkCmdEndRenderPass"); - table->vkCmdExecuteCommands = (PFN_vkCmdExecuteCommands)load(context, "vkCmdExecuteCommands"); - table->vkCmdFillBuffer = (PFN_vkCmdFillBuffer)load(context, "vkCmdFillBuffer"); - table->vkCmdNextSubpass = (PFN_vkCmdNextSubpass)load(context, "vkCmdNextSubpass"); - table->vkCmdPipelineBarrier = (PFN_vkCmdPipelineBarrier)load(context, "vkCmdPipelineBarrier"); - table->vkCmdPushConstants = (PFN_vkCmdPushConstants)load(context, "vkCmdPushConstants"); - table->vkCmdResetEvent = (PFN_vkCmdResetEvent)load(context, "vkCmdResetEvent"); - table->vkCmdResetQueryPool = (PFN_vkCmdResetQueryPool)load(context, "vkCmdResetQueryPool"); - table->vkCmdResolveImage = (PFN_vkCmdResolveImage)load(context, "vkCmdResolveImage"); - table->vkCmdSetBlendConstants = (PFN_vkCmdSetBlendConstants)load(context, "vkCmdSetBlendConstants"); - table->vkCmdSetDepthBias = (PFN_vkCmdSetDepthBias)load(context, "vkCmdSetDepthBias"); - table->vkCmdSetDepthBounds = (PFN_vkCmdSetDepthBounds)load(context, "vkCmdSetDepthBounds"); - table->vkCmdSetEvent = (PFN_vkCmdSetEvent)load(context, "vkCmdSetEvent"); - table->vkCmdSetLineWidth = (PFN_vkCmdSetLineWidth)load(context, "vkCmdSetLineWidth"); - table->vkCmdSetScissor = (PFN_vkCmdSetScissor)load(context, "vkCmdSetScissor"); - table->vkCmdSetStencilCompareMask = (PFN_vkCmdSetStencilCompareMask)load(context, "vkCmdSetStencilCompareMask"); - table->vkCmdSetStencilReference = (PFN_vkCmdSetStencilReference)load(context, "vkCmdSetStencilReference"); - table->vkCmdSetStencilWriteMask = (PFN_vkCmdSetStencilWriteMask)load(context, "vkCmdSetStencilWriteMask"); - table->vkCmdSetViewport = (PFN_vkCmdSetViewport)load(context, "vkCmdSetViewport"); - table->vkCmdUpdateBuffer = (PFN_vkCmdUpdateBuffer)load(context, "vkCmdUpdateBuffer"); - table->vkCmdWaitEvents = (PFN_vkCmdWaitEvents)load(context, "vkCmdWaitEvents"); - table->vkCmdWriteTimestamp = (PFN_vkCmdWriteTimestamp)load(context, "vkCmdWriteTimestamp"); - table->vkCreateBuffer = (PFN_vkCreateBuffer)load(context, "vkCreateBuffer"); - table->vkCreateBufferView = (PFN_vkCreateBufferView)load(context, "vkCreateBufferView"); - table->vkCreateCommandPool = (PFN_vkCreateCommandPool)load(context, "vkCreateCommandPool"); - table->vkCreateComputePipelines = (PFN_vkCreateComputePipelines)load(context, "vkCreateComputePipelines"); - table->vkCreateDescriptorPool = (PFN_vkCreateDescriptorPool)load(context, "vkCreateDescriptorPool"); - table->vkCreateDescriptorSetLayout = (PFN_vkCreateDescriptorSetLayout)load(context, "vkCreateDescriptorSetLayout"); - table->vkCreateEvent = (PFN_vkCreateEvent)load(context, "vkCreateEvent"); - table->vkCreateFence = (PFN_vkCreateFence)load(context, "vkCreateFence"); - table->vkCreateFramebuffer = (PFN_vkCreateFramebuffer)load(context, "vkCreateFramebuffer"); - table->vkCreateGraphicsPipelines = (PFN_vkCreateGraphicsPipelines)load(context, "vkCreateGraphicsPipelines"); - table->vkCreateImage = (PFN_vkCreateImage)load(context, "vkCreateImage"); - table->vkCreateImageView = (PFN_vkCreateImageView)load(context, "vkCreateImageView"); - table->vkCreatePipelineCache = (PFN_vkCreatePipelineCache)load(context, "vkCreatePipelineCache"); - table->vkCreatePipelineLayout = (PFN_vkCreatePipelineLayout)load(context, "vkCreatePipelineLayout"); - table->vkCreateQueryPool = (PFN_vkCreateQueryPool)load(context, "vkCreateQueryPool"); - table->vkCreateRenderPass = (PFN_vkCreateRenderPass)load(context, "vkCreateRenderPass"); - table->vkCreateSampler = (PFN_vkCreateSampler)load(context, "vkCreateSampler"); - table->vkCreateSemaphore = (PFN_vkCreateSemaphore)load(context, "vkCreateSemaphore"); - table->vkCreateShaderModule = (PFN_vkCreateShaderModule)load(context, "vkCreateShaderModule"); - table->vkDestroyBuffer = (PFN_vkDestroyBuffer)load(context, "vkDestroyBuffer"); - table->vkDestroyBufferView = (PFN_vkDestroyBufferView)load(context, "vkDestroyBufferView"); - table->vkDestroyCommandPool = (PFN_vkDestroyCommandPool)load(context, "vkDestroyCommandPool"); - table->vkDestroyDescriptorPool = (PFN_vkDestroyDescriptorPool)load(context, "vkDestroyDescriptorPool"); - table->vkDestroyDescriptorSetLayout = (PFN_vkDestroyDescriptorSetLayout)load(context, "vkDestroyDescriptorSetLayout"); - table->vkDestroyDevice = (PFN_vkDestroyDevice)load(context, "vkDestroyDevice"); - table->vkDestroyEvent = (PFN_vkDestroyEvent)load(context, "vkDestroyEvent"); - table->vkDestroyFence = (PFN_vkDestroyFence)load(context, "vkDestroyFence"); - table->vkDestroyFramebuffer = (PFN_vkDestroyFramebuffer)load(context, "vkDestroyFramebuffer"); - table->vkDestroyImage = (PFN_vkDestroyImage)load(context, "vkDestroyImage"); - table->vkDestroyImageView = (PFN_vkDestroyImageView)load(context, "vkDestroyImageView"); - table->vkDestroyPipeline = (PFN_vkDestroyPipeline)load(context, "vkDestroyPipeline"); - table->vkDestroyPipelineCache = (PFN_vkDestroyPipelineCache)load(context, "vkDestroyPipelineCache"); - table->vkDestroyPipelineLayout = (PFN_vkDestroyPipelineLayout)load(context, "vkDestroyPipelineLayout"); - table->vkDestroyQueryPool = (PFN_vkDestroyQueryPool)load(context, "vkDestroyQueryPool"); - table->vkDestroyRenderPass = (PFN_vkDestroyRenderPass)load(context, "vkDestroyRenderPass"); - table->vkDestroySampler = (PFN_vkDestroySampler)load(context, "vkDestroySampler"); - table->vkDestroySemaphore = (PFN_vkDestroySemaphore)load(context, "vkDestroySemaphore"); - table->vkDestroyShaderModule = (PFN_vkDestroyShaderModule)load(context, "vkDestroyShaderModule"); - table->vkDeviceWaitIdle = (PFN_vkDeviceWaitIdle)load(context, "vkDeviceWaitIdle"); - table->vkEndCommandBuffer = (PFN_vkEndCommandBuffer)load(context, "vkEndCommandBuffer"); - table->vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)load(context, "vkFlushMappedMemoryRanges"); - table->vkFreeCommandBuffers = (PFN_vkFreeCommandBuffers)load(context, "vkFreeCommandBuffers"); - table->vkFreeDescriptorSets = (PFN_vkFreeDescriptorSets)load(context, "vkFreeDescriptorSets"); - table->vkFreeMemory = (PFN_vkFreeMemory)load(context, "vkFreeMemory"); - table->vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)load(context, "vkGetBufferMemoryRequirements"); - table->vkGetDeviceMemoryCommitment = (PFN_vkGetDeviceMemoryCommitment)load(context, "vkGetDeviceMemoryCommitment"); - table->vkGetDeviceQueue = (PFN_vkGetDeviceQueue)load(context, "vkGetDeviceQueue"); - table->vkGetEventStatus = (PFN_vkGetEventStatus)load(context, "vkGetEventStatus"); - table->vkGetFenceStatus = (PFN_vkGetFenceStatus)load(context, "vkGetFenceStatus"); - table->vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)load(context, "vkGetImageMemoryRequirements"); - table->vkGetImageSparseMemoryRequirements = (PFN_vkGetImageSparseMemoryRequirements)load(context, "vkGetImageSparseMemoryRequirements"); - table->vkGetImageSubresourceLayout = (PFN_vkGetImageSubresourceLayout)load(context, "vkGetImageSubresourceLayout"); - table->vkGetPipelineCacheData = (PFN_vkGetPipelineCacheData)load(context, "vkGetPipelineCacheData"); - table->vkGetQueryPoolResults = (PFN_vkGetQueryPoolResults)load(context, "vkGetQueryPoolResults"); - table->vkGetRenderAreaGranularity = (PFN_vkGetRenderAreaGranularity)load(context, "vkGetRenderAreaGranularity"); - table->vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)load(context, "vkInvalidateMappedMemoryRanges"); - table->vkMapMemory = (PFN_vkMapMemory)load(context, "vkMapMemory"); - table->vkMergePipelineCaches = (PFN_vkMergePipelineCaches)load(context, "vkMergePipelineCaches"); - table->vkQueueBindSparse = (PFN_vkQueueBindSparse)load(context, "vkQueueBindSparse"); - table->vkQueueSubmit = (PFN_vkQueueSubmit)load(context, "vkQueueSubmit"); - table->vkQueueWaitIdle = (PFN_vkQueueWaitIdle)load(context, "vkQueueWaitIdle"); - table->vkResetCommandBuffer = (PFN_vkResetCommandBuffer)load(context, "vkResetCommandBuffer"); - table->vkResetCommandPool = (PFN_vkResetCommandPool)load(context, "vkResetCommandPool"); - table->vkResetDescriptorPool = (PFN_vkResetDescriptorPool)load(context, "vkResetDescriptorPool"); - table->vkResetEvent = (PFN_vkResetEvent)load(context, "vkResetEvent"); - table->vkResetFences = (PFN_vkResetFences)load(context, "vkResetFences"); - table->vkSetEvent = (PFN_vkSetEvent)load(context, "vkSetEvent"); - table->vkUnmapMemory = (PFN_vkUnmapMemory)load(context, "vkUnmapMemory"); - table->vkUpdateDescriptorSets = (PFN_vkUpdateDescriptorSets)load(context, "vkUpdateDescriptorSets"); - table->vkWaitForFences = (PFN_vkWaitForFences)load(context, "vkWaitForFences"); -#endif /* defined(VK_VERSION_1_0) */ -#if defined(VK_VERSION_1_1) - table->vkBindBufferMemory2 = (PFN_vkBindBufferMemory2)load(context, "vkBindBufferMemory2"); - table->vkBindImageMemory2 = (PFN_vkBindImageMemory2)load(context, "vkBindImageMemory2"); - table->vkCmdDispatchBase = (PFN_vkCmdDispatchBase)load(context, "vkCmdDispatchBase"); - table->vkCmdSetDeviceMask = (PFN_vkCmdSetDeviceMask)load(context, "vkCmdSetDeviceMask"); - table->vkCreateDescriptorUpdateTemplate = (PFN_vkCreateDescriptorUpdateTemplate)load(context, "vkCreateDescriptorUpdateTemplate"); - table->vkCreateSamplerYcbcrConversion = (PFN_vkCreateSamplerYcbcrConversion)load(context, "vkCreateSamplerYcbcrConversion"); - table->vkDestroyDescriptorUpdateTemplate = (PFN_vkDestroyDescriptorUpdateTemplate)load(context, "vkDestroyDescriptorUpdateTemplate"); - table->vkDestroySamplerYcbcrConversion = (PFN_vkDestroySamplerYcbcrConversion)load(context, "vkDestroySamplerYcbcrConversion"); - table->vkGetBufferMemoryRequirements2 = (PFN_vkGetBufferMemoryRequirements2)load(context, "vkGetBufferMemoryRequirements2"); - table->vkGetDescriptorSetLayoutSupport = (PFN_vkGetDescriptorSetLayoutSupport)load(context, "vkGetDescriptorSetLayoutSupport"); - table->vkGetDeviceGroupPeerMemoryFeatures = (PFN_vkGetDeviceGroupPeerMemoryFeatures)load(context, "vkGetDeviceGroupPeerMemoryFeatures"); - table->vkGetDeviceQueue2 = (PFN_vkGetDeviceQueue2)load(context, "vkGetDeviceQueue2"); - table->vkGetImageMemoryRequirements2 = (PFN_vkGetImageMemoryRequirements2)load(context, "vkGetImageMemoryRequirements2"); - table->vkGetImageSparseMemoryRequirements2 = (PFN_vkGetImageSparseMemoryRequirements2)load(context, "vkGetImageSparseMemoryRequirements2"); - table->vkTrimCommandPool = (PFN_vkTrimCommandPool)load(context, "vkTrimCommandPool"); - table->vkUpdateDescriptorSetWithTemplate = (PFN_vkUpdateDescriptorSetWithTemplate)load(context, "vkUpdateDescriptorSetWithTemplate"); -#endif /* defined(VK_VERSION_1_1) */ -#if defined(VK_VERSION_1_2) - table->vkCmdBeginRenderPass2 = (PFN_vkCmdBeginRenderPass2)load(context, "vkCmdBeginRenderPass2"); - table->vkCmdDrawIndexedIndirectCount = (PFN_vkCmdDrawIndexedIndirectCount)load(context, "vkCmdDrawIndexedIndirectCount"); - table->vkCmdDrawIndirectCount = (PFN_vkCmdDrawIndirectCount)load(context, "vkCmdDrawIndirectCount"); - table->vkCmdEndRenderPass2 = (PFN_vkCmdEndRenderPass2)load(context, "vkCmdEndRenderPass2"); - table->vkCmdNextSubpass2 = (PFN_vkCmdNextSubpass2)load(context, "vkCmdNextSubpass2"); - table->vkCreateRenderPass2 = (PFN_vkCreateRenderPass2)load(context, "vkCreateRenderPass2"); - table->vkGetBufferDeviceAddress = (PFN_vkGetBufferDeviceAddress)load(context, "vkGetBufferDeviceAddress"); - table->vkGetBufferOpaqueCaptureAddress = (PFN_vkGetBufferOpaqueCaptureAddress)load(context, "vkGetBufferOpaqueCaptureAddress"); - table->vkGetDeviceMemoryOpaqueCaptureAddress = (PFN_vkGetDeviceMemoryOpaqueCaptureAddress)load(context, "vkGetDeviceMemoryOpaqueCaptureAddress"); - table->vkGetSemaphoreCounterValue = (PFN_vkGetSemaphoreCounterValue)load(context, "vkGetSemaphoreCounterValue"); - table->vkResetQueryPool = (PFN_vkResetQueryPool)load(context, "vkResetQueryPool"); - table->vkSignalSemaphore = (PFN_vkSignalSemaphore)load(context, "vkSignalSemaphore"); - table->vkWaitSemaphores = (PFN_vkWaitSemaphores)load(context, "vkWaitSemaphores"); -#endif /* defined(VK_VERSION_1_2) */ -#if defined(VK_VERSION_1_3) - table->vkCmdBeginRendering = (PFN_vkCmdBeginRendering)load(context, "vkCmdBeginRendering"); - table->vkCmdBindVertexBuffers2 = (PFN_vkCmdBindVertexBuffers2)load(context, "vkCmdBindVertexBuffers2"); - table->vkCmdBlitImage2 = (PFN_vkCmdBlitImage2)load(context, "vkCmdBlitImage2"); - table->vkCmdCopyBuffer2 = (PFN_vkCmdCopyBuffer2)load(context, "vkCmdCopyBuffer2"); - table->vkCmdCopyBufferToImage2 = (PFN_vkCmdCopyBufferToImage2)load(context, "vkCmdCopyBufferToImage2"); - table->vkCmdCopyImage2 = (PFN_vkCmdCopyImage2)load(context, "vkCmdCopyImage2"); - table->vkCmdCopyImageToBuffer2 = (PFN_vkCmdCopyImageToBuffer2)load(context, "vkCmdCopyImageToBuffer2"); - table->vkCmdEndRendering = (PFN_vkCmdEndRendering)load(context, "vkCmdEndRendering"); - table->vkCmdPipelineBarrier2 = (PFN_vkCmdPipelineBarrier2)load(context, "vkCmdPipelineBarrier2"); - table->vkCmdResetEvent2 = (PFN_vkCmdResetEvent2)load(context, "vkCmdResetEvent2"); - table->vkCmdResolveImage2 = (PFN_vkCmdResolveImage2)load(context, "vkCmdResolveImage2"); - table->vkCmdSetCullMode = (PFN_vkCmdSetCullMode)load(context, "vkCmdSetCullMode"); - table->vkCmdSetDepthBiasEnable = (PFN_vkCmdSetDepthBiasEnable)load(context, "vkCmdSetDepthBiasEnable"); - table->vkCmdSetDepthBoundsTestEnable = (PFN_vkCmdSetDepthBoundsTestEnable)load(context, "vkCmdSetDepthBoundsTestEnable"); - table->vkCmdSetDepthCompareOp = (PFN_vkCmdSetDepthCompareOp)load(context, "vkCmdSetDepthCompareOp"); - table->vkCmdSetDepthTestEnable = (PFN_vkCmdSetDepthTestEnable)load(context, "vkCmdSetDepthTestEnable"); - table->vkCmdSetDepthWriteEnable = (PFN_vkCmdSetDepthWriteEnable)load(context, "vkCmdSetDepthWriteEnable"); - table->vkCmdSetEvent2 = (PFN_vkCmdSetEvent2)load(context, "vkCmdSetEvent2"); - table->vkCmdSetFrontFace = (PFN_vkCmdSetFrontFace)load(context, "vkCmdSetFrontFace"); - table->vkCmdSetPrimitiveRestartEnable = (PFN_vkCmdSetPrimitiveRestartEnable)load(context, "vkCmdSetPrimitiveRestartEnable"); - table->vkCmdSetPrimitiveTopology = (PFN_vkCmdSetPrimitiveTopology)load(context, "vkCmdSetPrimitiveTopology"); - table->vkCmdSetRasterizerDiscardEnable = (PFN_vkCmdSetRasterizerDiscardEnable)load(context, "vkCmdSetRasterizerDiscardEnable"); - table->vkCmdSetScissorWithCount = (PFN_vkCmdSetScissorWithCount)load(context, "vkCmdSetScissorWithCount"); - table->vkCmdSetStencilOp = (PFN_vkCmdSetStencilOp)load(context, "vkCmdSetStencilOp"); - table->vkCmdSetStencilTestEnable = (PFN_vkCmdSetStencilTestEnable)load(context, "vkCmdSetStencilTestEnable"); - table->vkCmdSetViewportWithCount = (PFN_vkCmdSetViewportWithCount)load(context, "vkCmdSetViewportWithCount"); - table->vkCmdWaitEvents2 = (PFN_vkCmdWaitEvents2)load(context, "vkCmdWaitEvents2"); - table->vkCmdWriteTimestamp2 = (PFN_vkCmdWriteTimestamp2)load(context, "vkCmdWriteTimestamp2"); - table->vkCreatePrivateDataSlot = (PFN_vkCreatePrivateDataSlot)load(context, "vkCreatePrivateDataSlot"); - table->vkDestroyPrivateDataSlot = (PFN_vkDestroyPrivateDataSlot)load(context, "vkDestroyPrivateDataSlot"); - table->vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)load(context, "vkGetDeviceBufferMemoryRequirements"); - table->vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)load(context, "vkGetDeviceImageMemoryRequirements"); - table->vkGetDeviceImageSparseMemoryRequirements = (PFN_vkGetDeviceImageSparseMemoryRequirements)load(context, "vkGetDeviceImageSparseMemoryRequirements"); - table->vkGetPrivateData = (PFN_vkGetPrivateData)load(context, "vkGetPrivateData"); - table->vkQueueSubmit2 = (PFN_vkQueueSubmit2)load(context, "vkQueueSubmit2"); - table->vkSetPrivateData = (PFN_vkSetPrivateData)load(context, "vkSetPrivateData"); -#endif /* defined(VK_VERSION_1_3) */ -#if defined(VK_AMDX_shader_enqueue) - table->vkCmdDispatchGraphAMDX = (PFN_vkCmdDispatchGraphAMDX)load(context, "vkCmdDispatchGraphAMDX"); - table->vkCmdDispatchGraphIndirectAMDX = (PFN_vkCmdDispatchGraphIndirectAMDX)load(context, "vkCmdDispatchGraphIndirectAMDX"); - table->vkCmdDispatchGraphIndirectCountAMDX = (PFN_vkCmdDispatchGraphIndirectCountAMDX)load(context, "vkCmdDispatchGraphIndirectCountAMDX"); - table->vkCmdInitializeGraphScratchMemoryAMDX = (PFN_vkCmdInitializeGraphScratchMemoryAMDX)load(context, "vkCmdInitializeGraphScratchMemoryAMDX"); - table->vkCreateExecutionGraphPipelinesAMDX = (PFN_vkCreateExecutionGraphPipelinesAMDX)load(context, "vkCreateExecutionGraphPipelinesAMDX"); - table->vkGetExecutionGraphPipelineNodeIndexAMDX = (PFN_vkGetExecutionGraphPipelineNodeIndexAMDX)load(context, "vkGetExecutionGraphPipelineNodeIndexAMDX"); - table->vkGetExecutionGraphPipelineScratchSizeAMDX = (PFN_vkGetExecutionGraphPipelineScratchSizeAMDX)load(context, "vkGetExecutionGraphPipelineScratchSizeAMDX"); -#endif /* defined(VK_AMDX_shader_enqueue) */ -#if defined(VK_AMD_buffer_marker) - table->vkCmdWriteBufferMarkerAMD = (PFN_vkCmdWriteBufferMarkerAMD)load(context, "vkCmdWriteBufferMarkerAMD"); -#endif /* defined(VK_AMD_buffer_marker) */ -#if defined(VK_AMD_display_native_hdr) - table->vkSetLocalDimmingAMD = (PFN_vkSetLocalDimmingAMD)load(context, "vkSetLocalDimmingAMD"); -#endif /* defined(VK_AMD_display_native_hdr) */ -#if defined(VK_AMD_draw_indirect_count) - table->vkCmdDrawIndexedIndirectCountAMD = (PFN_vkCmdDrawIndexedIndirectCountAMD)load(context, "vkCmdDrawIndexedIndirectCountAMD"); - table->vkCmdDrawIndirectCountAMD = (PFN_vkCmdDrawIndirectCountAMD)load(context, "vkCmdDrawIndirectCountAMD"); -#endif /* defined(VK_AMD_draw_indirect_count) */ -#if defined(VK_AMD_shader_info) - table->vkGetShaderInfoAMD = (PFN_vkGetShaderInfoAMD)load(context, "vkGetShaderInfoAMD"); -#endif /* defined(VK_AMD_shader_info) */ -#if defined(VK_ANDROID_external_memory_android_hardware_buffer) - table->vkGetAndroidHardwareBufferPropertiesANDROID = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID)load(context, "vkGetAndroidHardwareBufferPropertiesANDROID"); - table->vkGetMemoryAndroidHardwareBufferANDROID = (PFN_vkGetMemoryAndroidHardwareBufferANDROID)load(context, "vkGetMemoryAndroidHardwareBufferANDROID"); -#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */ -#if defined(VK_EXT_attachment_feedback_loop_dynamic_state) - table->vkCmdSetAttachmentFeedbackLoopEnableEXT = (PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT)load(context, "vkCmdSetAttachmentFeedbackLoopEnableEXT"); -#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */ -#if defined(VK_EXT_buffer_device_address) - table->vkGetBufferDeviceAddressEXT = (PFN_vkGetBufferDeviceAddressEXT)load(context, "vkGetBufferDeviceAddressEXT"); -#endif /* defined(VK_EXT_buffer_device_address) */ -#if defined(VK_EXT_calibrated_timestamps) - table->vkGetCalibratedTimestampsEXT = (PFN_vkGetCalibratedTimestampsEXT)load(context, "vkGetCalibratedTimestampsEXT"); -#endif /* defined(VK_EXT_calibrated_timestamps) */ -#if defined(VK_EXT_color_write_enable) - table->vkCmdSetColorWriteEnableEXT = (PFN_vkCmdSetColorWriteEnableEXT)load(context, "vkCmdSetColorWriteEnableEXT"); -#endif /* defined(VK_EXT_color_write_enable) */ -#if defined(VK_EXT_conditional_rendering) - table->vkCmdBeginConditionalRenderingEXT = (PFN_vkCmdBeginConditionalRenderingEXT)load(context, "vkCmdBeginConditionalRenderingEXT"); - table->vkCmdEndConditionalRenderingEXT = (PFN_vkCmdEndConditionalRenderingEXT)load(context, "vkCmdEndConditionalRenderingEXT"); -#endif /* defined(VK_EXT_conditional_rendering) */ -#if defined(VK_EXT_debug_marker) - table->vkCmdDebugMarkerBeginEXT = (PFN_vkCmdDebugMarkerBeginEXT)load(context, "vkCmdDebugMarkerBeginEXT"); - table->vkCmdDebugMarkerEndEXT = (PFN_vkCmdDebugMarkerEndEXT)load(context, "vkCmdDebugMarkerEndEXT"); - table->vkCmdDebugMarkerInsertEXT = (PFN_vkCmdDebugMarkerInsertEXT)load(context, "vkCmdDebugMarkerInsertEXT"); - table->vkDebugMarkerSetObjectNameEXT = (PFN_vkDebugMarkerSetObjectNameEXT)load(context, "vkDebugMarkerSetObjectNameEXT"); - table->vkDebugMarkerSetObjectTagEXT = (PFN_vkDebugMarkerSetObjectTagEXT)load(context, "vkDebugMarkerSetObjectTagEXT"); -#endif /* defined(VK_EXT_debug_marker) */ -#if defined(VK_EXT_depth_bias_control) - table->vkCmdSetDepthBias2EXT = (PFN_vkCmdSetDepthBias2EXT)load(context, "vkCmdSetDepthBias2EXT"); -#endif /* defined(VK_EXT_depth_bias_control) */ -#if defined(VK_EXT_descriptor_buffer) - table->vkCmdBindDescriptorBufferEmbeddedSamplersEXT = (PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT)load(context, "vkCmdBindDescriptorBufferEmbeddedSamplersEXT"); - table->vkCmdBindDescriptorBuffersEXT = (PFN_vkCmdBindDescriptorBuffersEXT)load(context, "vkCmdBindDescriptorBuffersEXT"); - table->vkCmdSetDescriptorBufferOffsetsEXT = (PFN_vkCmdSetDescriptorBufferOffsetsEXT)load(context, "vkCmdSetDescriptorBufferOffsetsEXT"); - table->vkGetBufferOpaqueCaptureDescriptorDataEXT = (PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT)load(context, "vkGetBufferOpaqueCaptureDescriptorDataEXT"); - table->vkGetDescriptorEXT = (PFN_vkGetDescriptorEXT)load(context, "vkGetDescriptorEXT"); - table->vkGetDescriptorSetLayoutBindingOffsetEXT = (PFN_vkGetDescriptorSetLayoutBindingOffsetEXT)load(context, "vkGetDescriptorSetLayoutBindingOffsetEXT"); - table->vkGetDescriptorSetLayoutSizeEXT = (PFN_vkGetDescriptorSetLayoutSizeEXT)load(context, "vkGetDescriptorSetLayoutSizeEXT"); - table->vkGetImageOpaqueCaptureDescriptorDataEXT = (PFN_vkGetImageOpaqueCaptureDescriptorDataEXT)load(context, "vkGetImageOpaqueCaptureDescriptorDataEXT"); - table->vkGetImageViewOpaqueCaptureDescriptorDataEXT = (PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT)load(context, "vkGetImageViewOpaqueCaptureDescriptorDataEXT"); - table->vkGetSamplerOpaqueCaptureDescriptorDataEXT = (PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT)load(context, "vkGetSamplerOpaqueCaptureDescriptorDataEXT"); -#endif /* defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) - table->vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT = (PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT)load(context, "vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT"); -#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */ -#if defined(VK_EXT_device_fault) - table->vkGetDeviceFaultInfoEXT = (PFN_vkGetDeviceFaultInfoEXT)load(context, "vkGetDeviceFaultInfoEXT"); -#endif /* defined(VK_EXT_device_fault) */ -#if defined(VK_EXT_discard_rectangles) - table->vkCmdSetDiscardRectangleEXT = (PFN_vkCmdSetDiscardRectangleEXT)load(context, "vkCmdSetDiscardRectangleEXT"); -#endif /* defined(VK_EXT_discard_rectangles) */ -#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 - table->vkCmdSetDiscardRectangleEnableEXT = (PFN_vkCmdSetDiscardRectangleEnableEXT)load(context, "vkCmdSetDiscardRectangleEnableEXT"); - table->vkCmdSetDiscardRectangleModeEXT = (PFN_vkCmdSetDiscardRectangleModeEXT)load(context, "vkCmdSetDiscardRectangleModeEXT"); -#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */ -#if defined(VK_EXT_display_control) - table->vkDisplayPowerControlEXT = (PFN_vkDisplayPowerControlEXT)load(context, "vkDisplayPowerControlEXT"); - table->vkGetSwapchainCounterEXT = (PFN_vkGetSwapchainCounterEXT)load(context, "vkGetSwapchainCounterEXT"); - table->vkRegisterDeviceEventEXT = (PFN_vkRegisterDeviceEventEXT)load(context, "vkRegisterDeviceEventEXT"); - table->vkRegisterDisplayEventEXT = (PFN_vkRegisterDisplayEventEXT)load(context, "vkRegisterDisplayEventEXT"); -#endif /* defined(VK_EXT_display_control) */ -#if defined(VK_EXT_external_memory_host) - table->vkGetMemoryHostPointerPropertiesEXT = (PFN_vkGetMemoryHostPointerPropertiesEXT)load(context, "vkGetMemoryHostPointerPropertiesEXT"); -#endif /* defined(VK_EXT_external_memory_host) */ -#if defined(VK_EXT_full_screen_exclusive) - table->vkAcquireFullScreenExclusiveModeEXT = (PFN_vkAcquireFullScreenExclusiveModeEXT)load(context, "vkAcquireFullScreenExclusiveModeEXT"); - table->vkReleaseFullScreenExclusiveModeEXT = (PFN_vkReleaseFullScreenExclusiveModeEXT)load(context, "vkReleaseFullScreenExclusiveModeEXT"); -#endif /* defined(VK_EXT_full_screen_exclusive) */ -#if defined(VK_EXT_hdr_metadata) - table->vkSetHdrMetadataEXT = (PFN_vkSetHdrMetadataEXT)load(context, "vkSetHdrMetadataEXT"); -#endif /* defined(VK_EXT_hdr_metadata) */ -#if defined(VK_EXT_host_image_copy) - table->vkCopyImageToImageEXT = (PFN_vkCopyImageToImageEXT)load(context, "vkCopyImageToImageEXT"); - table->vkCopyImageToMemoryEXT = (PFN_vkCopyImageToMemoryEXT)load(context, "vkCopyImageToMemoryEXT"); - table->vkCopyMemoryToImageEXT = (PFN_vkCopyMemoryToImageEXT)load(context, "vkCopyMemoryToImageEXT"); - table->vkTransitionImageLayoutEXT = (PFN_vkTransitionImageLayoutEXT)load(context, "vkTransitionImageLayoutEXT"); -#endif /* defined(VK_EXT_host_image_copy) */ -#if defined(VK_EXT_host_query_reset) - table->vkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)load(context, "vkResetQueryPoolEXT"); -#endif /* defined(VK_EXT_host_query_reset) */ -#if defined(VK_EXT_image_drm_format_modifier) - table->vkGetImageDrmFormatModifierPropertiesEXT = (PFN_vkGetImageDrmFormatModifierPropertiesEXT)load(context, "vkGetImageDrmFormatModifierPropertiesEXT"); -#endif /* defined(VK_EXT_image_drm_format_modifier) */ -#if defined(VK_EXT_line_rasterization) - table->vkCmdSetLineStippleEXT = (PFN_vkCmdSetLineStippleEXT)load(context, "vkCmdSetLineStippleEXT"); -#endif /* defined(VK_EXT_line_rasterization) */ -#if defined(VK_EXT_mesh_shader) - table->vkCmdDrawMeshTasksEXT = (PFN_vkCmdDrawMeshTasksEXT)load(context, "vkCmdDrawMeshTasksEXT"); - table->vkCmdDrawMeshTasksIndirectCountEXT = (PFN_vkCmdDrawMeshTasksIndirectCountEXT)load(context, "vkCmdDrawMeshTasksIndirectCountEXT"); - table->vkCmdDrawMeshTasksIndirectEXT = (PFN_vkCmdDrawMeshTasksIndirectEXT)load(context, "vkCmdDrawMeshTasksIndirectEXT"); -#endif /* defined(VK_EXT_mesh_shader) */ -#if defined(VK_EXT_metal_objects) - table->vkExportMetalObjectsEXT = (PFN_vkExportMetalObjectsEXT)load(context, "vkExportMetalObjectsEXT"); -#endif /* defined(VK_EXT_metal_objects) */ -#if defined(VK_EXT_multi_draw) - table->vkCmdDrawMultiEXT = (PFN_vkCmdDrawMultiEXT)load(context, "vkCmdDrawMultiEXT"); - table->vkCmdDrawMultiIndexedEXT = (PFN_vkCmdDrawMultiIndexedEXT)load(context, "vkCmdDrawMultiIndexedEXT"); -#endif /* defined(VK_EXT_multi_draw) */ -#if defined(VK_EXT_opacity_micromap) - table->vkBuildMicromapsEXT = (PFN_vkBuildMicromapsEXT)load(context, "vkBuildMicromapsEXT"); - table->vkCmdBuildMicromapsEXT = (PFN_vkCmdBuildMicromapsEXT)load(context, "vkCmdBuildMicromapsEXT"); - table->vkCmdCopyMemoryToMicromapEXT = (PFN_vkCmdCopyMemoryToMicromapEXT)load(context, "vkCmdCopyMemoryToMicromapEXT"); - table->vkCmdCopyMicromapEXT = (PFN_vkCmdCopyMicromapEXT)load(context, "vkCmdCopyMicromapEXT"); - table->vkCmdCopyMicromapToMemoryEXT = (PFN_vkCmdCopyMicromapToMemoryEXT)load(context, "vkCmdCopyMicromapToMemoryEXT"); - table->vkCmdWriteMicromapsPropertiesEXT = (PFN_vkCmdWriteMicromapsPropertiesEXT)load(context, "vkCmdWriteMicromapsPropertiesEXT"); - table->vkCopyMemoryToMicromapEXT = (PFN_vkCopyMemoryToMicromapEXT)load(context, "vkCopyMemoryToMicromapEXT"); - table->vkCopyMicromapEXT = (PFN_vkCopyMicromapEXT)load(context, "vkCopyMicromapEXT"); - table->vkCopyMicromapToMemoryEXT = (PFN_vkCopyMicromapToMemoryEXT)load(context, "vkCopyMicromapToMemoryEXT"); - table->vkCreateMicromapEXT = (PFN_vkCreateMicromapEXT)load(context, "vkCreateMicromapEXT"); - table->vkDestroyMicromapEXT = (PFN_vkDestroyMicromapEXT)load(context, "vkDestroyMicromapEXT"); - table->vkGetDeviceMicromapCompatibilityEXT = (PFN_vkGetDeviceMicromapCompatibilityEXT)load(context, "vkGetDeviceMicromapCompatibilityEXT"); - table->vkGetMicromapBuildSizesEXT = (PFN_vkGetMicromapBuildSizesEXT)load(context, "vkGetMicromapBuildSizesEXT"); - table->vkWriteMicromapsPropertiesEXT = (PFN_vkWriteMicromapsPropertiesEXT)load(context, "vkWriteMicromapsPropertiesEXT"); -#endif /* defined(VK_EXT_opacity_micromap) */ -#if defined(VK_EXT_pageable_device_local_memory) - table->vkSetDeviceMemoryPriorityEXT = (PFN_vkSetDeviceMemoryPriorityEXT)load(context, "vkSetDeviceMemoryPriorityEXT"); -#endif /* defined(VK_EXT_pageable_device_local_memory) */ -#if defined(VK_EXT_pipeline_properties) - table->vkGetPipelinePropertiesEXT = (PFN_vkGetPipelinePropertiesEXT)load(context, "vkGetPipelinePropertiesEXT"); -#endif /* defined(VK_EXT_pipeline_properties) */ -#if defined(VK_EXT_private_data) - table->vkCreatePrivateDataSlotEXT = (PFN_vkCreatePrivateDataSlotEXT)load(context, "vkCreatePrivateDataSlotEXT"); - table->vkDestroyPrivateDataSlotEXT = (PFN_vkDestroyPrivateDataSlotEXT)load(context, "vkDestroyPrivateDataSlotEXT"); - table->vkGetPrivateDataEXT = (PFN_vkGetPrivateDataEXT)load(context, "vkGetPrivateDataEXT"); - table->vkSetPrivateDataEXT = (PFN_vkSetPrivateDataEXT)load(context, "vkSetPrivateDataEXT"); -#endif /* defined(VK_EXT_private_data) */ -#if defined(VK_EXT_sample_locations) - table->vkCmdSetSampleLocationsEXT = (PFN_vkCmdSetSampleLocationsEXT)load(context, "vkCmdSetSampleLocationsEXT"); -#endif /* defined(VK_EXT_sample_locations) */ -#if defined(VK_EXT_shader_module_identifier) - table->vkGetShaderModuleCreateInfoIdentifierEXT = (PFN_vkGetShaderModuleCreateInfoIdentifierEXT)load(context, "vkGetShaderModuleCreateInfoIdentifierEXT"); - table->vkGetShaderModuleIdentifierEXT = (PFN_vkGetShaderModuleIdentifierEXT)load(context, "vkGetShaderModuleIdentifierEXT"); -#endif /* defined(VK_EXT_shader_module_identifier) */ -#if defined(VK_EXT_shader_object) - table->vkCmdBindShadersEXT = (PFN_vkCmdBindShadersEXT)load(context, "vkCmdBindShadersEXT"); - table->vkCreateShadersEXT = (PFN_vkCreateShadersEXT)load(context, "vkCreateShadersEXT"); - table->vkDestroyShaderEXT = (PFN_vkDestroyShaderEXT)load(context, "vkDestroyShaderEXT"); - table->vkGetShaderBinaryDataEXT = (PFN_vkGetShaderBinaryDataEXT)load(context, "vkGetShaderBinaryDataEXT"); -#endif /* defined(VK_EXT_shader_object) */ -#if defined(VK_EXT_swapchain_maintenance1) - table->vkReleaseSwapchainImagesEXT = (PFN_vkReleaseSwapchainImagesEXT)load(context, "vkReleaseSwapchainImagesEXT"); -#endif /* defined(VK_EXT_swapchain_maintenance1) */ -#if defined(VK_EXT_transform_feedback) - table->vkCmdBeginQueryIndexedEXT = (PFN_vkCmdBeginQueryIndexedEXT)load(context, "vkCmdBeginQueryIndexedEXT"); - table->vkCmdBeginTransformFeedbackEXT = (PFN_vkCmdBeginTransformFeedbackEXT)load(context, "vkCmdBeginTransformFeedbackEXT"); - table->vkCmdBindTransformFeedbackBuffersEXT = (PFN_vkCmdBindTransformFeedbackBuffersEXT)load(context, "vkCmdBindTransformFeedbackBuffersEXT"); - table->vkCmdDrawIndirectByteCountEXT = (PFN_vkCmdDrawIndirectByteCountEXT)load(context, "vkCmdDrawIndirectByteCountEXT"); - table->vkCmdEndQueryIndexedEXT = (PFN_vkCmdEndQueryIndexedEXT)load(context, "vkCmdEndQueryIndexedEXT"); - table->vkCmdEndTransformFeedbackEXT = (PFN_vkCmdEndTransformFeedbackEXT)load(context, "vkCmdEndTransformFeedbackEXT"); -#endif /* defined(VK_EXT_transform_feedback) */ -#if defined(VK_EXT_validation_cache) - table->vkCreateValidationCacheEXT = (PFN_vkCreateValidationCacheEXT)load(context, "vkCreateValidationCacheEXT"); - table->vkDestroyValidationCacheEXT = (PFN_vkDestroyValidationCacheEXT)load(context, "vkDestroyValidationCacheEXT"); - table->vkGetValidationCacheDataEXT = (PFN_vkGetValidationCacheDataEXT)load(context, "vkGetValidationCacheDataEXT"); - table->vkMergeValidationCachesEXT = (PFN_vkMergeValidationCachesEXT)load(context, "vkMergeValidationCachesEXT"); -#endif /* defined(VK_EXT_validation_cache) */ -#if defined(VK_FUCHSIA_buffer_collection) - table->vkCreateBufferCollectionFUCHSIA = (PFN_vkCreateBufferCollectionFUCHSIA)load(context, "vkCreateBufferCollectionFUCHSIA"); - table->vkDestroyBufferCollectionFUCHSIA = (PFN_vkDestroyBufferCollectionFUCHSIA)load(context, "vkDestroyBufferCollectionFUCHSIA"); - table->vkGetBufferCollectionPropertiesFUCHSIA = (PFN_vkGetBufferCollectionPropertiesFUCHSIA)load(context, "vkGetBufferCollectionPropertiesFUCHSIA"); - table->vkSetBufferCollectionBufferConstraintsFUCHSIA = (PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA)load(context, "vkSetBufferCollectionBufferConstraintsFUCHSIA"); - table->vkSetBufferCollectionImageConstraintsFUCHSIA = (PFN_vkSetBufferCollectionImageConstraintsFUCHSIA)load(context, "vkSetBufferCollectionImageConstraintsFUCHSIA"); -#endif /* defined(VK_FUCHSIA_buffer_collection) */ -#if defined(VK_FUCHSIA_external_memory) - table->vkGetMemoryZirconHandleFUCHSIA = (PFN_vkGetMemoryZirconHandleFUCHSIA)load(context, "vkGetMemoryZirconHandleFUCHSIA"); - table->vkGetMemoryZirconHandlePropertiesFUCHSIA = (PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA)load(context, "vkGetMemoryZirconHandlePropertiesFUCHSIA"); -#endif /* defined(VK_FUCHSIA_external_memory) */ -#if defined(VK_FUCHSIA_external_semaphore) - table->vkGetSemaphoreZirconHandleFUCHSIA = (PFN_vkGetSemaphoreZirconHandleFUCHSIA)load(context, "vkGetSemaphoreZirconHandleFUCHSIA"); - table->vkImportSemaphoreZirconHandleFUCHSIA = (PFN_vkImportSemaphoreZirconHandleFUCHSIA)load(context, "vkImportSemaphoreZirconHandleFUCHSIA"); -#endif /* defined(VK_FUCHSIA_external_semaphore) */ -#if defined(VK_GOOGLE_display_timing) - table->vkGetPastPresentationTimingGOOGLE = (PFN_vkGetPastPresentationTimingGOOGLE)load(context, "vkGetPastPresentationTimingGOOGLE"); - table->vkGetRefreshCycleDurationGOOGLE = (PFN_vkGetRefreshCycleDurationGOOGLE)load(context, "vkGetRefreshCycleDurationGOOGLE"); -#endif /* defined(VK_GOOGLE_display_timing) */ -#if defined(VK_HUAWEI_cluster_culling_shader) - table->vkCmdDrawClusterHUAWEI = (PFN_vkCmdDrawClusterHUAWEI)load(context, "vkCmdDrawClusterHUAWEI"); - table->vkCmdDrawClusterIndirectHUAWEI = (PFN_vkCmdDrawClusterIndirectHUAWEI)load(context, "vkCmdDrawClusterIndirectHUAWEI"); -#endif /* defined(VK_HUAWEI_cluster_culling_shader) */ -#if defined(VK_HUAWEI_invocation_mask) - table->vkCmdBindInvocationMaskHUAWEI = (PFN_vkCmdBindInvocationMaskHUAWEI)load(context, "vkCmdBindInvocationMaskHUAWEI"); -#endif /* defined(VK_HUAWEI_invocation_mask) */ -#if defined(VK_HUAWEI_subpass_shading) - table->vkCmdSubpassShadingHUAWEI = (PFN_vkCmdSubpassShadingHUAWEI)load(context, "vkCmdSubpassShadingHUAWEI"); - table->vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI = (PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI)load(context, "vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI"); -#endif /* defined(VK_HUAWEI_subpass_shading) */ -#if defined(VK_INTEL_performance_query) - table->vkAcquirePerformanceConfigurationINTEL = (PFN_vkAcquirePerformanceConfigurationINTEL)load(context, "vkAcquirePerformanceConfigurationINTEL"); - table->vkCmdSetPerformanceMarkerINTEL = (PFN_vkCmdSetPerformanceMarkerINTEL)load(context, "vkCmdSetPerformanceMarkerINTEL"); - table->vkCmdSetPerformanceOverrideINTEL = (PFN_vkCmdSetPerformanceOverrideINTEL)load(context, "vkCmdSetPerformanceOverrideINTEL"); - table->vkCmdSetPerformanceStreamMarkerINTEL = (PFN_vkCmdSetPerformanceStreamMarkerINTEL)load(context, "vkCmdSetPerformanceStreamMarkerINTEL"); - table->vkGetPerformanceParameterINTEL = (PFN_vkGetPerformanceParameterINTEL)load(context, "vkGetPerformanceParameterINTEL"); - table->vkInitializePerformanceApiINTEL = (PFN_vkInitializePerformanceApiINTEL)load(context, "vkInitializePerformanceApiINTEL"); - table->vkQueueSetPerformanceConfigurationINTEL = (PFN_vkQueueSetPerformanceConfigurationINTEL)load(context, "vkQueueSetPerformanceConfigurationINTEL"); - table->vkReleasePerformanceConfigurationINTEL = (PFN_vkReleasePerformanceConfigurationINTEL)load(context, "vkReleasePerformanceConfigurationINTEL"); - table->vkUninitializePerformanceApiINTEL = (PFN_vkUninitializePerformanceApiINTEL)load(context, "vkUninitializePerformanceApiINTEL"); -#endif /* defined(VK_INTEL_performance_query) */ -#if defined(VK_KHR_acceleration_structure) - table->vkBuildAccelerationStructuresKHR = (PFN_vkBuildAccelerationStructuresKHR)load(context, "vkBuildAccelerationStructuresKHR"); - table->vkCmdBuildAccelerationStructuresIndirectKHR = (PFN_vkCmdBuildAccelerationStructuresIndirectKHR)load(context, "vkCmdBuildAccelerationStructuresIndirectKHR"); - table->vkCmdBuildAccelerationStructuresKHR = (PFN_vkCmdBuildAccelerationStructuresKHR)load(context, "vkCmdBuildAccelerationStructuresKHR"); - table->vkCmdCopyAccelerationStructureKHR = (PFN_vkCmdCopyAccelerationStructureKHR)load(context, "vkCmdCopyAccelerationStructureKHR"); - table->vkCmdCopyAccelerationStructureToMemoryKHR = (PFN_vkCmdCopyAccelerationStructureToMemoryKHR)load(context, "vkCmdCopyAccelerationStructureToMemoryKHR"); - table->vkCmdCopyMemoryToAccelerationStructureKHR = (PFN_vkCmdCopyMemoryToAccelerationStructureKHR)load(context, "vkCmdCopyMemoryToAccelerationStructureKHR"); - table->vkCmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)load(context, "vkCmdWriteAccelerationStructuresPropertiesKHR"); - table->vkCopyAccelerationStructureKHR = (PFN_vkCopyAccelerationStructureKHR)load(context, "vkCopyAccelerationStructureKHR"); - table->vkCopyAccelerationStructureToMemoryKHR = (PFN_vkCopyAccelerationStructureToMemoryKHR)load(context, "vkCopyAccelerationStructureToMemoryKHR"); - table->vkCopyMemoryToAccelerationStructureKHR = (PFN_vkCopyMemoryToAccelerationStructureKHR)load(context, "vkCopyMemoryToAccelerationStructureKHR"); - table->vkCreateAccelerationStructureKHR = (PFN_vkCreateAccelerationStructureKHR)load(context, "vkCreateAccelerationStructureKHR"); - table->vkDestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR)load(context, "vkDestroyAccelerationStructureKHR"); - table->vkGetAccelerationStructureBuildSizesKHR = (PFN_vkGetAccelerationStructureBuildSizesKHR)load(context, "vkGetAccelerationStructureBuildSizesKHR"); - table->vkGetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR)load(context, "vkGetAccelerationStructureDeviceAddressKHR"); - table->vkGetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)load(context, "vkGetDeviceAccelerationStructureCompatibilityKHR"); - table->vkWriteAccelerationStructuresPropertiesKHR = (PFN_vkWriteAccelerationStructuresPropertiesKHR)load(context, "vkWriteAccelerationStructuresPropertiesKHR"); -#endif /* defined(VK_KHR_acceleration_structure) */ -#if defined(VK_KHR_bind_memory2) - table->vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2KHR)load(context, "vkBindBufferMemory2KHR"); - table->vkBindImageMemory2KHR = (PFN_vkBindImageMemory2KHR)load(context, "vkBindImageMemory2KHR"); -#endif /* defined(VK_KHR_bind_memory2) */ -#if defined(VK_KHR_buffer_device_address) - table->vkGetBufferDeviceAddressKHR = (PFN_vkGetBufferDeviceAddressKHR)load(context, "vkGetBufferDeviceAddressKHR"); - table->vkGetBufferOpaqueCaptureAddressKHR = (PFN_vkGetBufferOpaqueCaptureAddressKHR)load(context, "vkGetBufferOpaqueCaptureAddressKHR"); - table->vkGetDeviceMemoryOpaqueCaptureAddressKHR = (PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)load(context, "vkGetDeviceMemoryOpaqueCaptureAddressKHR"); -#endif /* defined(VK_KHR_buffer_device_address) */ -#if defined(VK_KHR_copy_commands2) - table->vkCmdBlitImage2KHR = (PFN_vkCmdBlitImage2KHR)load(context, "vkCmdBlitImage2KHR"); - table->vkCmdCopyBuffer2KHR = (PFN_vkCmdCopyBuffer2KHR)load(context, "vkCmdCopyBuffer2KHR"); - table->vkCmdCopyBufferToImage2KHR = (PFN_vkCmdCopyBufferToImage2KHR)load(context, "vkCmdCopyBufferToImage2KHR"); - table->vkCmdCopyImage2KHR = (PFN_vkCmdCopyImage2KHR)load(context, "vkCmdCopyImage2KHR"); - table->vkCmdCopyImageToBuffer2KHR = (PFN_vkCmdCopyImageToBuffer2KHR)load(context, "vkCmdCopyImageToBuffer2KHR"); - table->vkCmdResolveImage2KHR = (PFN_vkCmdResolveImage2KHR)load(context, "vkCmdResolveImage2KHR"); -#endif /* defined(VK_KHR_copy_commands2) */ -#if defined(VK_KHR_create_renderpass2) - table->vkCmdBeginRenderPass2KHR = (PFN_vkCmdBeginRenderPass2KHR)load(context, "vkCmdBeginRenderPass2KHR"); - table->vkCmdEndRenderPass2KHR = (PFN_vkCmdEndRenderPass2KHR)load(context, "vkCmdEndRenderPass2KHR"); - table->vkCmdNextSubpass2KHR = (PFN_vkCmdNextSubpass2KHR)load(context, "vkCmdNextSubpass2KHR"); - table->vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)load(context, "vkCreateRenderPass2KHR"); -#endif /* defined(VK_KHR_create_renderpass2) */ -#if defined(VK_KHR_deferred_host_operations) - table->vkCreateDeferredOperationKHR = (PFN_vkCreateDeferredOperationKHR)load(context, "vkCreateDeferredOperationKHR"); - table->vkDeferredOperationJoinKHR = (PFN_vkDeferredOperationJoinKHR)load(context, "vkDeferredOperationJoinKHR"); - table->vkDestroyDeferredOperationKHR = (PFN_vkDestroyDeferredOperationKHR)load(context, "vkDestroyDeferredOperationKHR"); - table->vkGetDeferredOperationMaxConcurrencyKHR = (PFN_vkGetDeferredOperationMaxConcurrencyKHR)load(context, "vkGetDeferredOperationMaxConcurrencyKHR"); - table->vkGetDeferredOperationResultKHR = (PFN_vkGetDeferredOperationResultKHR)load(context, "vkGetDeferredOperationResultKHR"); -#endif /* defined(VK_KHR_deferred_host_operations) */ -#if defined(VK_KHR_descriptor_update_template) - table->vkCreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR)load(context, "vkCreateDescriptorUpdateTemplateKHR"); - table->vkDestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR)load(context, "vkDestroyDescriptorUpdateTemplateKHR"); - table->vkUpdateDescriptorSetWithTemplateKHR = (PFN_vkUpdateDescriptorSetWithTemplateKHR)load(context, "vkUpdateDescriptorSetWithTemplateKHR"); -#endif /* defined(VK_KHR_descriptor_update_template) */ -#if defined(VK_KHR_device_group) - table->vkCmdDispatchBaseKHR = (PFN_vkCmdDispatchBaseKHR)load(context, "vkCmdDispatchBaseKHR"); - table->vkCmdSetDeviceMaskKHR = (PFN_vkCmdSetDeviceMaskKHR)load(context, "vkCmdSetDeviceMaskKHR"); - table->vkGetDeviceGroupPeerMemoryFeaturesKHR = (PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)load(context, "vkGetDeviceGroupPeerMemoryFeaturesKHR"); -#endif /* defined(VK_KHR_device_group) */ -#if defined(VK_KHR_display_swapchain) - table->vkCreateSharedSwapchainsKHR = (PFN_vkCreateSharedSwapchainsKHR)load(context, "vkCreateSharedSwapchainsKHR"); -#endif /* defined(VK_KHR_display_swapchain) */ -#if defined(VK_KHR_draw_indirect_count) - table->vkCmdDrawIndexedIndirectCountKHR = (PFN_vkCmdDrawIndexedIndirectCountKHR)load(context, "vkCmdDrawIndexedIndirectCountKHR"); - table->vkCmdDrawIndirectCountKHR = (PFN_vkCmdDrawIndirectCountKHR)load(context, "vkCmdDrawIndirectCountKHR"); -#endif /* defined(VK_KHR_draw_indirect_count) */ -#if defined(VK_KHR_dynamic_rendering) - table->vkCmdBeginRenderingKHR = (PFN_vkCmdBeginRenderingKHR)load(context, "vkCmdBeginRenderingKHR"); - table->vkCmdEndRenderingKHR = (PFN_vkCmdEndRenderingKHR)load(context, "vkCmdEndRenderingKHR"); -#endif /* defined(VK_KHR_dynamic_rendering) */ -#if defined(VK_KHR_external_fence_fd) - table->vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)load(context, "vkGetFenceFdKHR"); - table->vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)load(context, "vkImportFenceFdKHR"); -#endif /* defined(VK_KHR_external_fence_fd) */ -#if defined(VK_KHR_external_fence_win32) - table->vkGetFenceWin32HandleKHR = (PFN_vkGetFenceWin32HandleKHR)load(context, "vkGetFenceWin32HandleKHR"); - table->vkImportFenceWin32HandleKHR = (PFN_vkImportFenceWin32HandleKHR)load(context, "vkImportFenceWin32HandleKHR"); -#endif /* defined(VK_KHR_external_fence_win32) */ -#if defined(VK_KHR_external_memory_fd) - table->vkGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)load(context, "vkGetMemoryFdKHR"); - table->vkGetMemoryFdPropertiesKHR = (PFN_vkGetMemoryFdPropertiesKHR)load(context, "vkGetMemoryFdPropertiesKHR"); -#endif /* defined(VK_KHR_external_memory_fd) */ -#if defined(VK_KHR_external_memory_win32) - table->vkGetMemoryWin32HandleKHR = (PFN_vkGetMemoryWin32HandleKHR)load(context, "vkGetMemoryWin32HandleKHR"); - table->vkGetMemoryWin32HandlePropertiesKHR = (PFN_vkGetMemoryWin32HandlePropertiesKHR)load(context, "vkGetMemoryWin32HandlePropertiesKHR"); -#endif /* defined(VK_KHR_external_memory_win32) */ -#if defined(VK_KHR_external_semaphore_fd) - table->vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)load(context, "vkGetSemaphoreFdKHR"); - table->vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)load(context, "vkImportSemaphoreFdKHR"); -#endif /* defined(VK_KHR_external_semaphore_fd) */ -#if defined(VK_KHR_external_semaphore_win32) - table->vkGetSemaphoreWin32HandleKHR = (PFN_vkGetSemaphoreWin32HandleKHR)load(context, "vkGetSemaphoreWin32HandleKHR"); - table->vkImportSemaphoreWin32HandleKHR = (PFN_vkImportSemaphoreWin32HandleKHR)load(context, "vkImportSemaphoreWin32HandleKHR"); -#endif /* defined(VK_KHR_external_semaphore_win32) */ -#if defined(VK_KHR_fragment_shading_rate) - table->vkCmdSetFragmentShadingRateKHR = (PFN_vkCmdSetFragmentShadingRateKHR)load(context, "vkCmdSetFragmentShadingRateKHR"); -#endif /* defined(VK_KHR_fragment_shading_rate) */ -#if defined(VK_KHR_get_memory_requirements2) - table->vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2KHR)load(context, "vkGetBufferMemoryRequirements2KHR"); - table->vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2KHR)load(context, "vkGetImageMemoryRequirements2KHR"); - table->vkGetImageSparseMemoryRequirements2KHR = (PFN_vkGetImageSparseMemoryRequirements2KHR)load(context, "vkGetImageSparseMemoryRequirements2KHR"); -#endif /* defined(VK_KHR_get_memory_requirements2) */ -#if defined(VK_KHR_maintenance1) - table->vkTrimCommandPoolKHR = (PFN_vkTrimCommandPoolKHR)load(context, "vkTrimCommandPoolKHR"); -#endif /* defined(VK_KHR_maintenance1) */ -#if defined(VK_KHR_maintenance3) - table->vkGetDescriptorSetLayoutSupportKHR = (PFN_vkGetDescriptorSetLayoutSupportKHR)load(context, "vkGetDescriptorSetLayoutSupportKHR"); -#endif /* defined(VK_KHR_maintenance3) */ -#if defined(VK_KHR_maintenance4) - table->vkGetDeviceBufferMemoryRequirementsKHR = (PFN_vkGetDeviceBufferMemoryRequirementsKHR)load(context, "vkGetDeviceBufferMemoryRequirementsKHR"); - table->vkGetDeviceImageMemoryRequirementsKHR = (PFN_vkGetDeviceImageMemoryRequirementsKHR)load(context, "vkGetDeviceImageMemoryRequirementsKHR"); - table->vkGetDeviceImageSparseMemoryRequirementsKHR = (PFN_vkGetDeviceImageSparseMemoryRequirementsKHR)load(context, "vkGetDeviceImageSparseMemoryRequirementsKHR"); -#endif /* defined(VK_KHR_maintenance4) */ -#if defined(VK_KHR_maintenance5) - table->vkCmdBindIndexBuffer2KHR = (PFN_vkCmdBindIndexBuffer2KHR)load(context, "vkCmdBindIndexBuffer2KHR"); - table->vkGetDeviceImageSubresourceLayoutKHR = (PFN_vkGetDeviceImageSubresourceLayoutKHR)load(context, "vkGetDeviceImageSubresourceLayoutKHR"); - table->vkGetImageSubresourceLayout2KHR = (PFN_vkGetImageSubresourceLayout2KHR)load(context, "vkGetImageSubresourceLayout2KHR"); - table->vkGetRenderingAreaGranularityKHR = (PFN_vkGetRenderingAreaGranularityKHR)load(context, "vkGetRenderingAreaGranularityKHR"); -#endif /* defined(VK_KHR_maintenance5) */ -#if defined(VK_KHR_map_memory2) - table->vkMapMemory2KHR = (PFN_vkMapMemory2KHR)load(context, "vkMapMemory2KHR"); - table->vkUnmapMemory2KHR = (PFN_vkUnmapMemory2KHR)load(context, "vkUnmapMemory2KHR"); -#endif /* defined(VK_KHR_map_memory2) */ -#if defined(VK_KHR_performance_query) - table->vkAcquireProfilingLockKHR = (PFN_vkAcquireProfilingLockKHR)load(context, "vkAcquireProfilingLockKHR"); - table->vkReleaseProfilingLockKHR = (PFN_vkReleaseProfilingLockKHR)load(context, "vkReleaseProfilingLockKHR"); -#endif /* defined(VK_KHR_performance_query) */ -#if defined(VK_KHR_pipeline_executable_properties) - table->vkGetPipelineExecutableInternalRepresentationsKHR = (PFN_vkGetPipelineExecutableInternalRepresentationsKHR)load(context, "vkGetPipelineExecutableInternalRepresentationsKHR"); - table->vkGetPipelineExecutablePropertiesKHR = (PFN_vkGetPipelineExecutablePropertiesKHR)load(context, "vkGetPipelineExecutablePropertiesKHR"); - table->vkGetPipelineExecutableStatisticsKHR = (PFN_vkGetPipelineExecutableStatisticsKHR)load(context, "vkGetPipelineExecutableStatisticsKHR"); -#endif /* defined(VK_KHR_pipeline_executable_properties) */ -#if defined(VK_KHR_present_wait) - table->vkWaitForPresentKHR = (PFN_vkWaitForPresentKHR)load(context, "vkWaitForPresentKHR"); -#endif /* defined(VK_KHR_present_wait) */ -#if defined(VK_KHR_push_descriptor) - table->vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)load(context, "vkCmdPushDescriptorSetKHR"); -#endif /* defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) - table->vkCmdTraceRaysIndirect2KHR = (PFN_vkCmdTraceRaysIndirect2KHR)load(context, "vkCmdTraceRaysIndirect2KHR"); -#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_ray_tracing_pipeline) - table->vkCmdSetRayTracingPipelineStackSizeKHR = (PFN_vkCmdSetRayTracingPipelineStackSizeKHR)load(context, "vkCmdSetRayTracingPipelineStackSizeKHR"); - table->vkCmdTraceRaysIndirectKHR = (PFN_vkCmdTraceRaysIndirectKHR)load(context, "vkCmdTraceRaysIndirectKHR"); - table->vkCmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR)load(context, "vkCmdTraceRaysKHR"); - table->vkCreateRayTracingPipelinesKHR = (PFN_vkCreateRayTracingPipelinesKHR)load(context, "vkCreateRayTracingPipelinesKHR"); - table->vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)load(context, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR"); - table->vkGetRayTracingShaderGroupHandlesKHR = (PFN_vkGetRayTracingShaderGroupHandlesKHR)load(context, "vkGetRayTracingShaderGroupHandlesKHR"); - table->vkGetRayTracingShaderGroupStackSizeKHR = (PFN_vkGetRayTracingShaderGroupStackSizeKHR)load(context, "vkGetRayTracingShaderGroupStackSizeKHR"); -#endif /* defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_sampler_ycbcr_conversion) - table->vkCreateSamplerYcbcrConversionKHR = (PFN_vkCreateSamplerYcbcrConversionKHR)load(context, "vkCreateSamplerYcbcrConversionKHR"); - table->vkDestroySamplerYcbcrConversionKHR = (PFN_vkDestroySamplerYcbcrConversionKHR)load(context, "vkDestroySamplerYcbcrConversionKHR"); -#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */ -#if defined(VK_KHR_shared_presentable_image) - table->vkGetSwapchainStatusKHR = (PFN_vkGetSwapchainStatusKHR)load(context, "vkGetSwapchainStatusKHR"); -#endif /* defined(VK_KHR_shared_presentable_image) */ -#if defined(VK_KHR_swapchain) - table->vkAcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)load(context, "vkAcquireNextImageKHR"); - table->vkCreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)load(context, "vkCreateSwapchainKHR"); - table->vkDestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)load(context, "vkDestroySwapchainKHR"); - table->vkGetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)load(context, "vkGetSwapchainImagesKHR"); - table->vkQueuePresentKHR = (PFN_vkQueuePresentKHR)load(context, "vkQueuePresentKHR"); -#endif /* defined(VK_KHR_swapchain) */ -#if defined(VK_KHR_synchronization2) - table->vkCmdPipelineBarrier2KHR = (PFN_vkCmdPipelineBarrier2KHR)load(context, "vkCmdPipelineBarrier2KHR"); - table->vkCmdResetEvent2KHR = (PFN_vkCmdResetEvent2KHR)load(context, "vkCmdResetEvent2KHR"); - table->vkCmdSetEvent2KHR = (PFN_vkCmdSetEvent2KHR)load(context, "vkCmdSetEvent2KHR"); - table->vkCmdWaitEvents2KHR = (PFN_vkCmdWaitEvents2KHR)load(context, "vkCmdWaitEvents2KHR"); - table->vkCmdWriteTimestamp2KHR = (PFN_vkCmdWriteTimestamp2KHR)load(context, "vkCmdWriteTimestamp2KHR"); - table->vkQueueSubmit2KHR = (PFN_vkQueueSubmit2KHR)load(context, "vkQueueSubmit2KHR"); -#endif /* defined(VK_KHR_synchronization2) */ -#if defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) - table->vkCmdWriteBufferMarker2AMD = (PFN_vkCmdWriteBufferMarker2AMD)load(context, "vkCmdWriteBufferMarker2AMD"); -#endif /* defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) */ -#if defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) - table->vkGetQueueCheckpointData2NV = (PFN_vkGetQueueCheckpointData2NV)load(context, "vkGetQueueCheckpointData2NV"); -#endif /* defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_KHR_timeline_semaphore) - table->vkGetSemaphoreCounterValueKHR = (PFN_vkGetSemaphoreCounterValueKHR)load(context, "vkGetSemaphoreCounterValueKHR"); - table->vkSignalSemaphoreKHR = (PFN_vkSignalSemaphoreKHR)load(context, "vkSignalSemaphoreKHR"); - table->vkWaitSemaphoresKHR = (PFN_vkWaitSemaphoresKHR)load(context, "vkWaitSemaphoresKHR"); -#endif /* defined(VK_KHR_timeline_semaphore) */ -#if defined(VK_KHR_video_decode_queue) - table->vkCmdDecodeVideoKHR = (PFN_vkCmdDecodeVideoKHR)load(context, "vkCmdDecodeVideoKHR"); -#endif /* defined(VK_KHR_video_decode_queue) */ -#if defined(VK_KHR_video_encode_queue) - table->vkCmdEncodeVideoKHR = (PFN_vkCmdEncodeVideoKHR)load(context, "vkCmdEncodeVideoKHR"); - table->vkGetEncodedVideoSessionParametersKHR = (PFN_vkGetEncodedVideoSessionParametersKHR)load(context, "vkGetEncodedVideoSessionParametersKHR"); -#endif /* defined(VK_KHR_video_encode_queue) */ -#if defined(VK_KHR_video_queue) - table->vkBindVideoSessionMemoryKHR = (PFN_vkBindVideoSessionMemoryKHR)load(context, "vkBindVideoSessionMemoryKHR"); - table->vkCmdBeginVideoCodingKHR = (PFN_vkCmdBeginVideoCodingKHR)load(context, "vkCmdBeginVideoCodingKHR"); - table->vkCmdControlVideoCodingKHR = (PFN_vkCmdControlVideoCodingKHR)load(context, "vkCmdControlVideoCodingKHR"); - table->vkCmdEndVideoCodingKHR = (PFN_vkCmdEndVideoCodingKHR)load(context, "vkCmdEndVideoCodingKHR"); - table->vkCreateVideoSessionKHR = (PFN_vkCreateVideoSessionKHR)load(context, "vkCreateVideoSessionKHR"); - table->vkCreateVideoSessionParametersKHR = (PFN_vkCreateVideoSessionParametersKHR)load(context, "vkCreateVideoSessionParametersKHR"); - table->vkDestroyVideoSessionKHR = (PFN_vkDestroyVideoSessionKHR)load(context, "vkDestroyVideoSessionKHR"); - table->vkDestroyVideoSessionParametersKHR = (PFN_vkDestroyVideoSessionParametersKHR)load(context, "vkDestroyVideoSessionParametersKHR"); - table->vkGetVideoSessionMemoryRequirementsKHR = (PFN_vkGetVideoSessionMemoryRequirementsKHR)load(context, "vkGetVideoSessionMemoryRequirementsKHR"); - table->vkUpdateVideoSessionParametersKHR = (PFN_vkUpdateVideoSessionParametersKHR)load(context, "vkUpdateVideoSessionParametersKHR"); -#endif /* defined(VK_KHR_video_queue) */ -#if defined(VK_NVX_binary_import) - table->vkCmdCuLaunchKernelNVX = (PFN_vkCmdCuLaunchKernelNVX)load(context, "vkCmdCuLaunchKernelNVX"); - table->vkCreateCuFunctionNVX = (PFN_vkCreateCuFunctionNVX)load(context, "vkCreateCuFunctionNVX"); - table->vkCreateCuModuleNVX = (PFN_vkCreateCuModuleNVX)load(context, "vkCreateCuModuleNVX"); - table->vkDestroyCuFunctionNVX = (PFN_vkDestroyCuFunctionNVX)load(context, "vkDestroyCuFunctionNVX"); - table->vkDestroyCuModuleNVX = (PFN_vkDestroyCuModuleNVX)load(context, "vkDestroyCuModuleNVX"); -#endif /* defined(VK_NVX_binary_import) */ -#if defined(VK_NVX_image_view_handle) - table->vkGetImageViewAddressNVX = (PFN_vkGetImageViewAddressNVX)load(context, "vkGetImageViewAddressNVX"); - table->vkGetImageViewHandleNVX = (PFN_vkGetImageViewHandleNVX)load(context, "vkGetImageViewHandleNVX"); -#endif /* defined(VK_NVX_image_view_handle) */ -#if defined(VK_NV_clip_space_w_scaling) - table->vkCmdSetViewportWScalingNV = (PFN_vkCmdSetViewportWScalingNV)load(context, "vkCmdSetViewportWScalingNV"); -#endif /* defined(VK_NV_clip_space_w_scaling) */ -#if defined(VK_NV_copy_memory_indirect) - table->vkCmdCopyMemoryIndirectNV = (PFN_vkCmdCopyMemoryIndirectNV)load(context, "vkCmdCopyMemoryIndirectNV"); - table->vkCmdCopyMemoryToImageIndirectNV = (PFN_vkCmdCopyMemoryToImageIndirectNV)load(context, "vkCmdCopyMemoryToImageIndirectNV"); -#endif /* defined(VK_NV_copy_memory_indirect) */ -#if defined(VK_NV_cuda_kernel_launch) - table->vkCmdCudaLaunchKernelNV = (PFN_vkCmdCudaLaunchKernelNV)load(context, "vkCmdCudaLaunchKernelNV"); - table->vkCreateCudaFunctionNV = (PFN_vkCreateCudaFunctionNV)load(context, "vkCreateCudaFunctionNV"); - table->vkCreateCudaModuleNV = (PFN_vkCreateCudaModuleNV)load(context, "vkCreateCudaModuleNV"); - table->vkDestroyCudaFunctionNV = (PFN_vkDestroyCudaFunctionNV)load(context, "vkDestroyCudaFunctionNV"); - table->vkDestroyCudaModuleNV = (PFN_vkDestroyCudaModuleNV)load(context, "vkDestroyCudaModuleNV"); - table->vkGetCudaModuleCacheNV = (PFN_vkGetCudaModuleCacheNV)load(context, "vkGetCudaModuleCacheNV"); -#endif /* defined(VK_NV_cuda_kernel_launch) */ -#if defined(VK_NV_device_diagnostic_checkpoints) - table->vkCmdSetCheckpointNV = (PFN_vkCmdSetCheckpointNV)load(context, "vkCmdSetCheckpointNV"); - table->vkGetQueueCheckpointDataNV = (PFN_vkGetQueueCheckpointDataNV)load(context, "vkGetQueueCheckpointDataNV"); -#endif /* defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_NV_device_generated_commands) - table->vkCmdBindPipelineShaderGroupNV = (PFN_vkCmdBindPipelineShaderGroupNV)load(context, "vkCmdBindPipelineShaderGroupNV"); - table->vkCmdExecuteGeneratedCommandsNV = (PFN_vkCmdExecuteGeneratedCommandsNV)load(context, "vkCmdExecuteGeneratedCommandsNV"); - table->vkCmdPreprocessGeneratedCommandsNV = (PFN_vkCmdPreprocessGeneratedCommandsNV)load(context, "vkCmdPreprocessGeneratedCommandsNV"); - table->vkCreateIndirectCommandsLayoutNV = (PFN_vkCreateIndirectCommandsLayoutNV)load(context, "vkCreateIndirectCommandsLayoutNV"); - table->vkDestroyIndirectCommandsLayoutNV = (PFN_vkDestroyIndirectCommandsLayoutNV)load(context, "vkDestroyIndirectCommandsLayoutNV"); - table->vkGetGeneratedCommandsMemoryRequirementsNV = (PFN_vkGetGeneratedCommandsMemoryRequirementsNV)load(context, "vkGetGeneratedCommandsMemoryRequirementsNV"); -#endif /* defined(VK_NV_device_generated_commands) */ -#if defined(VK_NV_device_generated_commands_compute) - table->vkCmdUpdatePipelineIndirectBufferNV = (PFN_vkCmdUpdatePipelineIndirectBufferNV)load(context, "vkCmdUpdatePipelineIndirectBufferNV"); - table->vkGetPipelineIndirectDeviceAddressNV = (PFN_vkGetPipelineIndirectDeviceAddressNV)load(context, "vkGetPipelineIndirectDeviceAddressNV"); - table->vkGetPipelineIndirectMemoryRequirementsNV = (PFN_vkGetPipelineIndirectMemoryRequirementsNV)load(context, "vkGetPipelineIndirectMemoryRequirementsNV"); -#endif /* defined(VK_NV_device_generated_commands_compute) */ -#if defined(VK_NV_external_memory_rdma) - table->vkGetMemoryRemoteAddressNV = (PFN_vkGetMemoryRemoteAddressNV)load(context, "vkGetMemoryRemoteAddressNV"); -#endif /* defined(VK_NV_external_memory_rdma) */ -#if defined(VK_NV_external_memory_win32) - table->vkGetMemoryWin32HandleNV = (PFN_vkGetMemoryWin32HandleNV)load(context, "vkGetMemoryWin32HandleNV"); -#endif /* defined(VK_NV_external_memory_win32) */ -#if defined(VK_NV_fragment_shading_rate_enums) - table->vkCmdSetFragmentShadingRateEnumNV = (PFN_vkCmdSetFragmentShadingRateEnumNV)load(context, "vkCmdSetFragmentShadingRateEnumNV"); -#endif /* defined(VK_NV_fragment_shading_rate_enums) */ -#if defined(VK_NV_low_latency2) - table->vkGetLatencyTimingsNV = (PFN_vkGetLatencyTimingsNV)load(context, "vkGetLatencyTimingsNV"); - table->vkLatencySleepNV = (PFN_vkLatencySleepNV)load(context, "vkLatencySleepNV"); - table->vkQueueNotifyOutOfBandNV = (PFN_vkQueueNotifyOutOfBandNV)load(context, "vkQueueNotifyOutOfBandNV"); - table->vkSetLatencyMarkerNV = (PFN_vkSetLatencyMarkerNV)load(context, "vkSetLatencyMarkerNV"); - table->vkSetLatencySleepModeNV = (PFN_vkSetLatencySleepModeNV)load(context, "vkSetLatencySleepModeNV"); -#endif /* defined(VK_NV_low_latency2) */ -#if defined(VK_NV_memory_decompression) - table->vkCmdDecompressMemoryIndirectCountNV = (PFN_vkCmdDecompressMemoryIndirectCountNV)load(context, "vkCmdDecompressMemoryIndirectCountNV"); - table->vkCmdDecompressMemoryNV = (PFN_vkCmdDecompressMemoryNV)load(context, "vkCmdDecompressMemoryNV"); -#endif /* defined(VK_NV_memory_decompression) */ -#if defined(VK_NV_mesh_shader) - table->vkCmdDrawMeshTasksIndirectCountNV = (PFN_vkCmdDrawMeshTasksIndirectCountNV)load(context, "vkCmdDrawMeshTasksIndirectCountNV"); - table->vkCmdDrawMeshTasksIndirectNV = (PFN_vkCmdDrawMeshTasksIndirectNV)load(context, "vkCmdDrawMeshTasksIndirectNV"); - table->vkCmdDrawMeshTasksNV = (PFN_vkCmdDrawMeshTasksNV)load(context, "vkCmdDrawMeshTasksNV"); -#endif /* defined(VK_NV_mesh_shader) */ -#if defined(VK_NV_optical_flow) - table->vkBindOpticalFlowSessionImageNV = (PFN_vkBindOpticalFlowSessionImageNV)load(context, "vkBindOpticalFlowSessionImageNV"); - table->vkCmdOpticalFlowExecuteNV = (PFN_vkCmdOpticalFlowExecuteNV)load(context, "vkCmdOpticalFlowExecuteNV"); - table->vkCreateOpticalFlowSessionNV = (PFN_vkCreateOpticalFlowSessionNV)load(context, "vkCreateOpticalFlowSessionNV"); - table->vkDestroyOpticalFlowSessionNV = (PFN_vkDestroyOpticalFlowSessionNV)load(context, "vkDestroyOpticalFlowSessionNV"); -#endif /* defined(VK_NV_optical_flow) */ -#if defined(VK_NV_ray_tracing) - table->vkBindAccelerationStructureMemoryNV = (PFN_vkBindAccelerationStructureMemoryNV)load(context, "vkBindAccelerationStructureMemoryNV"); - table->vkCmdBuildAccelerationStructureNV = (PFN_vkCmdBuildAccelerationStructureNV)load(context, "vkCmdBuildAccelerationStructureNV"); - table->vkCmdCopyAccelerationStructureNV = (PFN_vkCmdCopyAccelerationStructureNV)load(context, "vkCmdCopyAccelerationStructureNV"); - table->vkCmdTraceRaysNV = (PFN_vkCmdTraceRaysNV)load(context, "vkCmdTraceRaysNV"); - table->vkCmdWriteAccelerationStructuresPropertiesNV = (PFN_vkCmdWriteAccelerationStructuresPropertiesNV)load(context, "vkCmdWriteAccelerationStructuresPropertiesNV"); - table->vkCompileDeferredNV = (PFN_vkCompileDeferredNV)load(context, "vkCompileDeferredNV"); - table->vkCreateAccelerationStructureNV = (PFN_vkCreateAccelerationStructureNV)load(context, "vkCreateAccelerationStructureNV"); - table->vkCreateRayTracingPipelinesNV = (PFN_vkCreateRayTracingPipelinesNV)load(context, "vkCreateRayTracingPipelinesNV"); - table->vkDestroyAccelerationStructureNV = (PFN_vkDestroyAccelerationStructureNV)load(context, "vkDestroyAccelerationStructureNV"); - table->vkGetAccelerationStructureHandleNV = (PFN_vkGetAccelerationStructureHandleNV)load(context, "vkGetAccelerationStructureHandleNV"); - table->vkGetAccelerationStructureMemoryRequirementsNV = (PFN_vkGetAccelerationStructureMemoryRequirementsNV)load(context, "vkGetAccelerationStructureMemoryRequirementsNV"); - table->vkGetRayTracingShaderGroupHandlesNV = (PFN_vkGetRayTracingShaderGroupHandlesNV)load(context, "vkGetRayTracingShaderGroupHandlesNV"); -#endif /* defined(VK_NV_ray_tracing) */ -#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 - table->vkCmdSetExclusiveScissorEnableNV = (PFN_vkCmdSetExclusiveScissorEnableNV)load(context, "vkCmdSetExclusiveScissorEnableNV"); -#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */ -#if defined(VK_NV_scissor_exclusive) - table->vkCmdSetExclusiveScissorNV = (PFN_vkCmdSetExclusiveScissorNV)load(context, "vkCmdSetExclusiveScissorNV"); -#endif /* defined(VK_NV_scissor_exclusive) */ -#if defined(VK_NV_shading_rate_image) - table->vkCmdBindShadingRateImageNV = (PFN_vkCmdBindShadingRateImageNV)load(context, "vkCmdBindShadingRateImageNV"); - table->vkCmdSetCoarseSampleOrderNV = (PFN_vkCmdSetCoarseSampleOrderNV)load(context, "vkCmdSetCoarseSampleOrderNV"); - table->vkCmdSetViewportShadingRatePaletteNV = (PFN_vkCmdSetViewportShadingRatePaletteNV)load(context, "vkCmdSetViewportShadingRatePaletteNV"); -#endif /* defined(VK_NV_shading_rate_image) */ -#if defined(VK_QCOM_tile_properties) - table->vkGetDynamicRenderingTilePropertiesQCOM = (PFN_vkGetDynamicRenderingTilePropertiesQCOM)load(context, "vkGetDynamicRenderingTilePropertiesQCOM"); - table->vkGetFramebufferTilePropertiesQCOM = (PFN_vkGetFramebufferTilePropertiesQCOM)load(context, "vkGetFramebufferTilePropertiesQCOM"); -#endif /* defined(VK_QCOM_tile_properties) */ -#if defined(VK_QNX_external_memory_screen_buffer) - table->vkGetScreenBufferPropertiesQNX = (PFN_vkGetScreenBufferPropertiesQNX)load(context, "vkGetScreenBufferPropertiesQNX"); -#endif /* defined(VK_QNX_external_memory_screen_buffer) */ -#if defined(VK_VALVE_descriptor_set_host_mapping) - table->vkGetDescriptorSetHostMappingVALVE = (PFN_vkGetDescriptorSetHostMappingVALVE)load(context, "vkGetDescriptorSetHostMappingVALVE"); - table->vkGetDescriptorSetLayoutHostMappingInfoVALVE = (PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE)load(context, "vkGetDescriptorSetLayoutHostMappingInfoVALVE"); -#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */ -#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) - table->vkCmdBindVertexBuffers2EXT = (PFN_vkCmdBindVertexBuffers2EXT)load(context, "vkCmdBindVertexBuffers2EXT"); - table->vkCmdSetCullModeEXT = (PFN_vkCmdSetCullModeEXT)load(context, "vkCmdSetCullModeEXT"); - table->vkCmdSetDepthBoundsTestEnableEXT = (PFN_vkCmdSetDepthBoundsTestEnableEXT)load(context, "vkCmdSetDepthBoundsTestEnableEXT"); - table->vkCmdSetDepthCompareOpEXT = (PFN_vkCmdSetDepthCompareOpEXT)load(context, "vkCmdSetDepthCompareOpEXT"); - table->vkCmdSetDepthTestEnableEXT = (PFN_vkCmdSetDepthTestEnableEXT)load(context, "vkCmdSetDepthTestEnableEXT"); - table->vkCmdSetDepthWriteEnableEXT = (PFN_vkCmdSetDepthWriteEnableEXT)load(context, "vkCmdSetDepthWriteEnableEXT"); - table->vkCmdSetFrontFaceEXT = (PFN_vkCmdSetFrontFaceEXT)load(context, "vkCmdSetFrontFaceEXT"); - table->vkCmdSetPrimitiveTopologyEXT = (PFN_vkCmdSetPrimitiveTopologyEXT)load(context, "vkCmdSetPrimitiveTopologyEXT"); - table->vkCmdSetScissorWithCountEXT = (PFN_vkCmdSetScissorWithCountEXT)load(context, "vkCmdSetScissorWithCountEXT"); - table->vkCmdSetStencilOpEXT = (PFN_vkCmdSetStencilOpEXT)load(context, "vkCmdSetStencilOpEXT"); - table->vkCmdSetStencilTestEnableEXT = (PFN_vkCmdSetStencilTestEnableEXT)load(context, "vkCmdSetStencilTestEnableEXT"); - table->vkCmdSetViewportWithCountEXT = (PFN_vkCmdSetViewportWithCountEXT)load(context, "vkCmdSetViewportWithCountEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) - table->vkCmdSetDepthBiasEnableEXT = (PFN_vkCmdSetDepthBiasEnableEXT)load(context, "vkCmdSetDepthBiasEnableEXT"); - table->vkCmdSetLogicOpEXT = (PFN_vkCmdSetLogicOpEXT)load(context, "vkCmdSetLogicOpEXT"); - table->vkCmdSetPatchControlPointsEXT = (PFN_vkCmdSetPatchControlPointsEXT)load(context, "vkCmdSetPatchControlPointsEXT"); - table->vkCmdSetPrimitiveRestartEnableEXT = (PFN_vkCmdSetPrimitiveRestartEnableEXT)load(context, "vkCmdSetPrimitiveRestartEnableEXT"); - table->vkCmdSetRasterizerDiscardEnableEXT = (PFN_vkCmdSetRasterizerDiscardEnableEXT)load(context, "vkCmdSetRasterizerDiscardEnableEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) - table->vkCmdSetAlphaToCoverageEnableEXT = (PFN_vkCmdSetAlphaToCoverageEnableEXT)load(context, "vkCmdSetAlphaToCoverageEnableEXT"); - table->vkCmdSetAlphaToOneEnableEXT = (PFN_vkCmdSetAlphaToOneEnableEXT)load(context, "vkCmdSetAlphaToOneEnableEXT"); - table->vkCmdSetColorBlendAdvancedEXT = (PFN_vkCmdSetColorBlendAdvancedEXT)load(context, "vkCmdSetColorBlendAdvancedEXT"); - table->vkCmdSetColorBlendEnableEXT = (PFN_vkCmdSetColorBlendEnableEXT)load(context, "vkCmdSetColorBlendEnableEXT"); - table->vkCmdSetColorBlendEquationEXT = (PFN_vkCmdSetColorBlendEquationEXT)load(context, "vkCmdSetColorBlendEquationEXT"); - table->vkCmdSetColorWriteMaskEXT = (PFN_vkCmdSetColorWriteMaskEXT)load(context, "vkCmdSetColorWriteMaskEXT"); - table->vkCmdSetConservativeRasterizationModeEXT = (PFN_vkCmdSetConservativeRasterizationModeEXT)load(context, "vkCmdSetConservativeRasterizationModeEXT"); - table->vkCmdSetDepthClampEnableEXT = (PFN_vkCmdSetDepthClampEnableEXT)load(context, "vkCmdSetDepthClampEnableEXT"); - table->vkCmdSetDepthClipEnableEXT = (PFN_vkCmdSetDepthClipEnableEXT)load(context, "vkCmdSetDepthClipEnableEXT"); - table->vkCmdSetDepthClipNegativeOneToOneEXT = (PFN_vkCmdSetDepthClipNegativeOneToOneEXT)load(context, "vkCmdSetDepthClipNegativeOneToOneEXT"); - table->vkCmdSetExtraPrimitiveOverestimationSizeEXT = (PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT)load(context, "vkCmdSetExtraPrimitiveOverestimationSizeEXT"); - table->vkCmdSetLineRasterizationModeEXT = (PFN_vkCmdSetLineRasterizationModeEXT)load(context, "vkCmdSetLineRasterizationModeEXT"); - table->vkCmdSetLineStippleEnableEXT = (PFN_vkCmdSetLineStippleEnableEXT)load(context, "vkCmdSetLineStippleEnableEXT"); - table->vkCmdSetLogicOpEnableEXT = (PFN_vkCmdSetLogicOpEnableEXT)load(context, "vkCmdSetLogicOpEnableEXT"); - table->vkCmdSetPolygonModeEXT = (PFN_vkCmdSetPolygonModeEXT)load(context, "vkCmdSetPolygonModeEXT"); - table->vkCmdSetProvokingVertexModeEXT = (PFN_vkCmdSetProvokingVertexModeEXT)load(context, "vkCmdSetProvokingVertexModeEXT"); - table->vkCmdSetRasterizationSamplesEXT = (PFN_vkCmdSetRasterizationSamplesEXT)load(context, "vkCmdSetRasterizationSamplesEXT"); - table->vkCmdSetRasterizationStreamEXT = (PFN_vkCmdSetRasterizationStreamEXT)load(context, "vkCmdSetRasterizationStreamEXT"); - table->vkCmdSetSampleLocationsEnableEXT = (PFN_vkCmdSetSampleLocationsEnableEXT)load(context, "vkCmdSetSampleLocationsEnableEXT"); - table->vkCmdSetSampleMaskEXT = (PFN_vkCmdSetSampleMaskEXT)load(context, "vkCmdSetSampleMaskEXT"); - table->vkCmdSetTessellationDomainOriginEXT = (PFN_vkCmdSetTessellationDomainOriginEXT)load(context, "vkCmdSetTessellationDomainOriginEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) - table->vkCmdSetViewportWScalingEnableNV = (PFN_vkCmdSetViewportWScalingEnableNV)load(context, "vkCmdSetViewportWScalingEnableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) - table->vkCmdSetViewportSwizzleNV = (PFN_vkCmdSetViewportSwizzleNV)load(context, "vkCmdSetViewportSwizzleNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) - table->vkCmdSetCoverageToColorEnableNV = (PFN_vkCmdSetCoverageToColorEnableNV)load(context, "vkCmdSetCoverageToColorEnableNV"); - table->vkCmdSetCoverageToColorLocationNV = (PFN_vkCmdSetCoverageToColorLocationNV)load(context, "vkCmdSetCoverageToColorLocationNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) - table->vkCmdSetCoverageModulationModeNV = (PFN_vkCmdSetCoverageModulationModeNV)load(context, "vkCmdSetCoverageModulationModeNV"); - table->vkCmdSetCoverageModulationTableEnableNV = (PFN_vkCmdSetCoverageModulationTableEnableNV)load(context, "vkCmdSetCoverageModulationTableEnableNV"); - table->vkCmdSetCoverageModulationTableNV = (PFN_vkCmdSetCoverageModulationTableNV)load(context, "vkCmdSetCoverageModulationTableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) - table->vkCmdSetShadingRateImageEnableNV = (PFN_vkCmdSetShadingRateImageEnableNV)load(context, "vkCmdSetShadingRateImageEnableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) - table->vkCmdSetRepresentativeFragmentTestEnableNV = (PFN_vkCmdSetRepresentativeFragmentTestEnableNV)load(context, "vkCmdSetRepresentativeFragmentTestEnableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) - table->vkCmdSetCoverageReductionModeNV = (PFN_vkCmdSetCoverageReductionModeNV)load(context, "vkCmdSetCoverageReductionModeNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */ -#if (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1)) - table->vkGetDeviceGroupSurfacePresentModes2EXT = (PFN_vkGetDeviceGroupSurfacePresentModes2EXT)load(context, "vkGetDeviceGroupSurfacePresentModes2EXT"); -#endif /* (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) - table->vkGetImageSubresourceLayout2EXT = (PFN_vkGetImageSubresourceLayout2EXT)load(context, "vkGetImageSubresourceLayout2EXT"); -#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */ -#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) - table->vkCmdSetVertexInputEXT = (PFN_vkCmdSetVertexInputEXT)load(context, "vkCmdSetVertexInputEXT"); -#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */ -#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template)) - table->vkCmdPushDescriptorSetWithTemplateKHR = (PFN_vkCmdPushDescriptorSetWithTemplateKHR)load(context, "vkCmdPushDescriptorSetWithTemplateKHR"); -#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) - table->vkGetDeviceGroupPresentCapabilitiesKHR = (PFN_vkGetDeviceGroupPresentCapabilitiesKHR)load(context, "vkGetDeviceGroupPresentCapabilitiesKHR"); - table->vkGetDeviceGroupSurfacePresentModesKHR = (PFN_vkGetDeviceGroupSurfacePresentModesKHR)load(context, "vkGetDeviceGroupSurfacePresentModesKHR"); -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) - table->vkAcquireNextImage2KHR = (PFN_vkAcquireNextImage2KHR)load(context, "vkAcquireNextImage2KHR"); -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ - /* VOLK_GENERATE_LOAD_DEVICE_TABLE */ -} - -#ifdef __GNUC__ -#ifdef VOLK_DEFAULT_VISIBILITY -# pragma GCC visibility push(default) -#else -# pragma GCC visibility push(hidden) -#endif -#endif - -/* VOLK_GENERATE_PROTOTYPES_C */ -#if defined(VK_VERSION_1_0) -PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; -PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; -PFN_vkAllocateMemory vkAllocateMemory; -PFN_vkBeginCommandBuffer vkBeginCommandBuffer; -PFN_vkBindBufferMemory vkBindBufferMemory; -PFN_vkBindImageMemory vkBindImageMemory; -PFN_vkCmdBeginQuery vkCmdBeginQuery; -PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass; -PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; -PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; -PFN_vkCmdBindPipeline vkCmdBindPipeline; -PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers; -PFN_vkCmdBlitImage vkCmdBlitImage; -PFN_vkCmdClearAttachments vkCmdClearAttachments; -PFN_vkCmdClearColorImage vkCmdClearColorImage; -PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage; -PFN_vkCmdCopyBuffer vkCmdCopyBuffer; -PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; -PFN_vkCmdCopyImage vkCmdCopyImage; -PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; -PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults; -PFN_vkCmdDispatch vkCmdDispatch; -PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect; -PFN_vkCmdDraw vkCmdDraw; -PFN_vkCmdDrawIndexed vkCmdDrawIndexed; -PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect; -PFN_vkCmdDrawIndirect vkCmdDrawIndirect; -PFN_vkCmdEndQuery vkCmdEndQuery; -PFN_vkCmdEndRenderPass vkCmdEndRenderPass; -PFN_vkCmdExecuteCommands vkCmdExecuteCommands; -PFN_vkCmdFillBuffer vkCmdFillBuffer; -PFN_vkCmdNextSubpass vkCmdNextSubpass; -PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; -PFN_vkCmdPushConstants vkCmdPushConstants; -PFN_vkCmdResetEvent vkCmdResetEvent; -PFN_vkCmdResetQueryPool vkCmdResetQueryPool; -PFN_vkCmdResolveImage vkCmdResolveImage; -PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; -PFN_vkCmdSetDepthBias vkCmdSetDepthBias; -PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; -PFN_vkCmdSetEvent vkCmdSetEvent; -PFN_vkCmdSetLineWidth vkCmdSetLineWidth; -PFN_vkCmdSetScissor vkCmdSetScissor; -PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; -PFN_vkCmdSetStencilReference vkCmdSetStencilReference; -PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; -PFN_vkCmdSetViewport vkCmdSetViewport; -PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer; -PFN_vkCmdWaitEvents vkCmdWaitEvents; -PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp; -PFN_vkCreateBuffer vkCreateBuffer; -PFN_vkCreateBufferView vkCreateBufferView; -PFN_vkCreateCommandPool vkCreateCommandPool; -PFN_vkCreateComputePipelines vkCreateComputePipelines; -PFN_vkCreateDescriptorPool vkCreateDescriptorPool; -PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; -PFN_vkCreateDevice vkCreateDevice; -PFN_vkCreateEvent vkCreateEvent; -PFN_vkCreateFence vkCreateFence; -PFN_vkCreateFramebuffer vkCreateFramebuffer; -PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; -PFN_vkCreateImage vkCreateImage; -PFN_vkCreateImageView vkCreateImageView; -PFN_vkCreateInstance vkCreateInstance; -PFN_vkCreatePipelineCache vkCreatePipelineCache; -PFN_vkCreatePipelineLayout vkCreatePipelineLayout; -PFN_vkCreateQueryPool vkCreateQueryPool; -PFN_vkCreateRenderPass vkCreateRenderPass; -PFN_vkCreateSampler vkCreateSampler; -PFN_vkCreateSemaphore vkCreateSemaphore; -PFN_vkCreateShaderModule vkCreateShaderModule; -PFN_vkDestroyBuffer vkDestroyBuffer; -PFN_vkDestroyBufferView vkDestroyBufferView; -PFN_vkDestroyCommandPool vkDestroyCommandPool; -PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; -PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; -PFN_vkDestroyDevice vkDestroyDevice; -PFN_vkDestroyEvent vkDestroyEvent; -PFN_vkDestroyFence vkDestroyFence; -PFN_vkDestroyFramebuffer vkDestroyFramebuffer; -PFN_vkDestroyImage vkDestroyImage; -PFN_vkDestroyImageView vkDestroyImageView; -PFN_vkDestroyInstance vkDestroyInstance; -PFN_vkDestroyPipeline vkDestroyPipeline; -PFN_vkDestroyPipelineCache vkDestroyPipelineCache; -PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; -PFN_vkDestroyQueryPool vkDestroyQueryPool; -PFN_vkDestroyRenderPass vkDestroyRenderPass; -PFN_vkDestroySampler vkDestroySampler; -PFN_vkDestroySemaphore vkDestroySemaphore; -PFN_vkDestroyShaderModule vkDestroyShaderModule; -PFN_vkDeviceWaitIdle vkDeviceWaitIdle; -PFN_vkEndCommandBuffer vkEndCommandBuffer; -PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties; -PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties; -PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties; -PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties; -PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices; -PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; -PFN_vkFreeCommandBuffers vkFreeCommandBuffers; -PFN_vkFreeDescriptorSets vkFreeDescriptorSets; -PFN_vkFreeMemory vkFreeMemory; -PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; -PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment; -PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; -PFN_vkGetDeviceQueue vkGetDeviceQueue; -PFN_vkGetEventStatus vkGetEventStatus; -PFN_vkGetFenceStatus vkGetFenceStatus; -PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; -PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements; -PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout; -PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; -PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures; -PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties; -PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties; -PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; -PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; -PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties; -PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties; -PFN_vkGetPipelineCacheData vkGetPipelineCacheData; -PFN_vkGetQueryPoolResults vkGetQueryPoolResults; -PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity; -PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; -PFN_vkMapMemory vkMapMemory; -PFN_vkMergePipelineCaches vkMergePipelineCaches; -PFN_vkQueueBindSparse vkQueueBindSparse; -PFN_vkQueueSubmit vkQueueSubmit; -PFN_vkQueueWaitIdle vkQueueWaitIdle; -PFN_vkResetCommandBuffer vkResetCommandBuffer; -PFN_vkResetCommandPool vkResetCommandPool; -PFN_vkResetDescriptorPool vkResetDescriptorPool; -PFN_vkResetEvent vkResetEvent; -PFN_vkResetFences vkResetFences; -PFN_vkSetEvent vkSetEvent; -PFN_vkUnmapMemory vkUnmapMemory; -PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; -PFN_vkWaitForFences vkWaitForFences; -#endif /* defined(VK_VERSION_1_0) */ -#if defined(VK_VERSION_1_1) -PFN_vkBindBufferMemory2 vkBindBufferMemory2; -PFN_vkBindImageMemory2 vkBindImageMemory2; -PFN_vkCmdDispatchBase vkCmdDispatchBase; -PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask; -PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate; -PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion; -PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate; -PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion; -PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion; -PFN_vkEnumeratePhysicalDeviceGroups vkEnumeratePhysicalDeviceGroups; -PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; -PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport; -PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures; -PFN_vkGetDeviceQueue2 vkGetDeviceQueue2; -PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; -PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2; -PFN_vkGetPhysicalDeviceExternalBufferProperties vkGetPhysicalDeviceExternalBufferProperties; -PFN_vkGetPhysicalDeviceExternalFenceProperties vkGetPhysicalDeviceExternalFenceProperties; -PFN_vkGetPhysicalDeviceExternalSemaphoreProperties vkGetPhysicalDeviceExternalSemaphoreProperties; -PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2; -PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2; -PFN_vkGetPhysicalDeviceImageFormatProperties2 vkGetPhysicalDeviceImageFormatProperties2; -PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; -PFN_vkGetPhysicalDeviceProperties2 vkGetPhysicalDeviceProperties2; -PFN_vkGetPhysicalDeviceQueueFamilyProperties2 vkGetPhysicalDeviceQueueFamilyProperties2; -PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 vkGetPhysicalDeviceSparseImageFormatProperties2; -PFN_vkTrimCommandPool vkTrimCommandPool; -PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate; -#endif /* defined(VK_VERSION_1_1) */ -#if defined(VK_VERSION_1_2) -PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2; -PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount; -PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount; -PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2; -PFN_vkCmdNextSubpass2 vkCmdNextSubpass2; -PFN_vkCreateRenderPass2 vkCreateRenderPass2; -PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress; -PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress; -PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress; -PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue; -PFN_vkResetQueryPool vkResetQueryPool; -PFN_vkSignalSemaphore vkSignalSemaphore; -PFN_vkWaitSemaphores vkWaitSemaphores; -#endif /* defined(VK_VERSION_1_2) */ -#if defined(VK_VERSION_1_3) -PFN_vkCmdBeginRendering vkCmdBeginRendering; -PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2; -PFN_vkCmdBlitImage2 vkCmdBlitImage2; -PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2; -PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2; -PFN_vkCmdCopyImage2 vkCmdCopyImage2; -PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2; -PFN_vkCmdEndRendering vkCmdEndRendering; -PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2; -PFN_vkCmdResetEvent2 vkCmdResetEvent2; -PFN_vkCmdResolveImage2 vkCmdResolveImage2; -PFN_vkCmdSetCullMode vkCmdSetCullMode; -PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable; -PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable; -PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp; -PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable; -PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable; -PFN_vkCmdSetEvent2 vkCmdSetEvent2; -PFN_vkCmdSetFrontFace vkCmdSetFrontFace; -PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable; -PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology; -PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable; -PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount; -PFN_vkCmdSetStencilOp vkCmdSetStencilOp; -PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable; -PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount; -PFN_vkCmdWaitEvents2 vkCmdWaitEvents2; -PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2; -PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot; -PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot; -PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements; -PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements; -PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements; -PFN_vkGetPhysicalDeviceToolProperties vkGetPhysicalDeviceToolProperties; -PFN_vkGetPrivateData vkGetPrivateData; -PFN_vkQueueSubmit2 vkQueueSubmit2; -PFN_vkSetPrivateData vkSetPrivateData; -#endif /* defined(VK_VERSION_1_3) */ -#if defined(VK_AMDX_shader_enqueue) -PFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX; -PFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX; -PFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX; -PFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX; -PFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX; -PFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX; -PFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX; -#endif /* defined(VK_AMDX_shader_enqueue) */ -#if defined(VK_AMD_buffer_marker) -PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD; -#endif /* defined(VK_AMD_buffer_marker) */ -#if defined(VK_AMD_display_native_hdr) -PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD; -#endif /* defined(VK_AMD_display_native_hdr) */ -#if defined(VK_AMD_draw_indirect_count) -PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD; -PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD; -#endif /* defined(VK_AMD_draw_indirect_count) */ -#if defined(VK_AMD_shader_info) -PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD; -#endif /* defined(VK_AMD_shader_info) */ -#if defined(VK_ANDROID_external_memory_android_hardware_buffer) -PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID; -PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID; -#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */ -#if defined(VK_EXT_acquire_drm_display) -PFN_vkAcquireDrmDisplayEXT vkAcquireDrmDisplayEXT; -PFN_vkGetDrmDisplayEXT vkGetDrmDisplayEXT; -#endif /* defined(VK_EXT_acquire_drm_display) */ -#if defined(VK_EXT_acquire_xlib_display) -PFN_vkAcquireXlibDisplayEXT vkAcquireXlibDisplayEXT; -PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT; -#endif /* defined(VK_EXT_acquire_xlib_display) */ -#if defined(VK_EXT_attachment_feedback_loop_dynamic_state) -PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT; -#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */ -#if defined(VK_EXT_buffer_device_address) -PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT; -#endif /* defined(VK_EXT_buffer_device_address) */ -#if defined(VK_EXT_calibrated_timestamps) -PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT; -PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT vkGetPhysicalDeviceCalibrateableTimeDomainsEXT; -#endif /* defined(VK_EXT_calibrated_timestamps) */ -#if defined(VK_EXT_color_write_enable) -PFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT; -#endif /* defined(VK_EXT_color_write_enable) */ -#if defined(VK_EXT_conditional_rendering) -PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT; -PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT; -#endif /* defined(VK_EXT_conditional_rendering) */ -#if defined(VK_EXT_debug_marker) -PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT; -PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT; -PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT; -PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT; -PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT; -#endif /* defined(VK_EXT_debug_marker) */ -#if defined(VK_EXT_debug_report) -PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT; -PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT; -PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT; -#endif /* defined(VK_EXT_debug_report) */ -#if defined(VK_EXT_debug_utils) -PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT; -PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT; -PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT; -PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT; -PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT; -PFN_vkQueueBeginDebugUtilsLabelEXT vkQueueBeginDebugUtilsLabelEXT; -PFN_vkQueueEndDebugUtilsLabelEXT vkQueueEndDebugUtilsLabelEXT; -PFN_vkQueueInsertDebugUtilsLabelEXT vkQueueInsertDebugUtilsLabelEXT; -PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT; -PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT; -PFN_vkSubmitDebugUtilsMessageEXT vkSubmitDebugUtilsMessageEXT; -#endif /* defined(VK_EXT_debug_utils) */ -#if defined(VK_EXT_depth_bias_control) -PFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT; -#endif /* defined(VK_EXT_depth_bias_control) */ -#if defined(VK_EXT_descriptor_buffer) -PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT; -PFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT; -PFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT; -PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT; -PFN_vkGetDescriptorEXT vkGetDescriptorEXT; -PFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT; -PFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT; -PFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT; -PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT; -PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT; -#endif /* defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) -PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT; -#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */ -#if defined(VK_EXT_device_fault) -PFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT; -#endif /* defined(VK_EXT_device_fault) */ -#if defined(VK_EXT_direct_mode_display) -PFN_vkReleaseDisplayEXT vkReleaseDisplayEXT; -#endif /* defined(VK_EXT_direct_mode_display) */ -#if defined(VK_EXT_directfb_surface) -PFN_vkCreateDirectFBSurfaceEXT vkCreateDirectFBSurfaceEXT; -PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT vkGetPhysicalDeviceDirectFBPresentationSupportEXT; -#endif /* defined(VK_EXT_directfb_surface) */ -#if defined(VK_EXT_discard_rectangles) -PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT; -#endif /* defined(VK_EXT_discard_rectangles) */ -#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 -PFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT; -PFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT; -#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */ -#if defined(VK_EXT_display_control) -PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT; -PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT; -PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT; -PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT; -#endif /* defined(VK_EXT_display_control) */ -#if defined(VK_EXT_display_surface_counter) -PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT vkGetPhysicalDeviceSurfaceCapabilities2EXT; -#endif /* defined(VK_EXT_display_surface_counter) */ -#if defined(VK_EXT_external_memory_host) -PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT; -#endif /* defined(VK_EXT_external_memory_host) */ -#if defined(VK_EXT_full_screen_exclusive) -PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT; -PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT vkGetPhysicalDeviceSurfacePresentModes2EXT; -PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT; -#endif /* defined(VK_EXT_full_screen_exclusive) */ -#if defined(VK_EXT_hdr_metadata) -PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT; -#endif /* defined(VK_EXT_hdr_metadata) */ -#if defined(VK_EXT_headless_surface) -PFN_vkCreateHeadlessSurfaceEXT vkCreateHeadlessSurfaceEXT; -#endif /* defined(VK_EXT_headless_surface) */ -#if defined(VK_EXT_host_image_copy) -PFN_vkCopyImageToImageEXT vkCopyImageToImageEXT; -PFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT; -PFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT; -PFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT; -#endif /* defined(VK_EXT_host_image_copy) */ -#if defined(VK_EXT_host_query_reset) -PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT; -#endif /* defined(VK_EXT_host_query_reset) */ -#if defined(VK_EXT_image_drm_format_modifier) -PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT; -#endif /* defined(VK_EXT_image_drm_format_modifier) */ -#if defined(VK_EXT_line_rasterization) -PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT; -#endif /* defined(VK_EXT_line_rasterization) */ -#if defined(VK_EXT_mesh_shader) -PFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT; -PFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT; -PFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT; -#endif /* defined(VK_EXT_mesh_shader) */ -#if defined(VK_EXT_metal_objects) -PFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT; -#endif /* defined(VK_EXT_metal_objects) */ -#if defined(VK_EXT_metal_surface) -PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT; -#endif /* defined(VK_EXT_metal_surface) */ -#if defined(VK_EXT_multi_draw) -PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT; -PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT; -#endif /* defined(VK_EXT_multi_draw) */ -#if defined(VK_EXT_opacity_micromap) -PFN_vkBuildMicromapsEXT vkBuildMicromapsEXT; -PFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT; -PFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT; -PFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT; -PFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT; -PFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT; -PFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT; -PFN_vkCopyMicromapEXT vkCopyMicromapEXT; -PFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT; -PFN_vkCreateMicromapEXT vkCreateMicromapEXT; -PFN_vkDestroyMicromapEXT vkDestroyMicromapEXT; -PFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT; -PFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT; -PFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT; -#endif /* defined(VK_EXT_opacity_micromap) */ -#if defined(VK_EXT_pageable_device_local_memory) -PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT; -#endif /* defined(VK_EXT_pageable_device_local_memory) */ -#if defined(VK_EXT_pipeline_properties) -PFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT; -#endif /* defined(VK_EXT_pipeline_properties) */ -#if defined(VK_EXT_private_data) -PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT; -PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT; -PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT; -PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT; -#endif /* defined(VK_EXT_private_data) */ -#if defined(VK_EXT_sample_locations) -PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT; -PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT; -#endif /* defined(VK_EXT_sample_locations) */ -#if defined(VK_EXT_shader_module_identifier) -PFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT; -PFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT; -#endif /* defined(VK_EXT_shader_module_identifier) */ -#if defined(VK_EXT_shader_object) -PFN_vkCmdBindShadersEXT vkCmdBindShadersEXT; -PFN_vkCreateShadersEXT vkCreateShadersEXT; -PFN_vkDestroyShaderEXT vkDestroyShaderEXT; -PFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT; -#endif /* defined(VK_EXT_shader_object) */ -#if defined(VK_EXT_swapchain_maintenance1) -PFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT; -#endif /* defined(VK_EXT_swapchain_maintenance1) */ -#if defined(VK_EXT_tooling_info) -PFN_vkGetPhysicalDeviceToolPropertiesEXT vkGetPhysicalDeviceToolPropertiesEXT; -#endif /* defined(VK_EXT_tooling_info) */ -#if defined(VK_EXT_transform_feedback) -PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT; -PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT; -PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT; -PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT; -PFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT; -PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT; -#endif /* defined(VK_EXT_transform_feedback) */ -#if defined(VK_EXT_validation_cache) -PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT; -PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT; -PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT; -PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT; -#endif /* defined(VK_EXT_validation_cache) */ -#if defined(VK_FUCHSIA_buffer_collection) -PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA; -PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA; -PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA; -PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA; -PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA; -#endif /* defined(VK_FUCHSIA_buffer_collection) */ -#if defined(VK_FUCHSIA_external_memory) -PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA; -PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA; -#endif /* defined(VK_FUCHSIA_external_memory) */ -#if defined(VK_FUCHSIA_external_semaphore) -PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA; -PFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA; -#endif /* defined(VK_FUCHSIA_external_semaphore) */ -#if defined(VK_FUCHSIA_imagepipe_surface) -PFN_vkCreateImagePipeSurfaceFUCHSIA vkCreateImagePipeSurfaceFUCHSIA; -#endif /* defined(VK_FUCHSIA_imagepipe_surface) */ -#if defined(VK_GGP_stream_descriptor_surface) -PFN_vkCreateStreamDescriptorSurfaceGGP vkCreateStreamDescriptorSurfaceGGP; -#endif /* defined(VK_GGP_stream_descriptor_surface) */ -#if defined(VK_GOOGLE_display_timing) -PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE; -PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE; -#endif /* defined(VK_GOOGLE_display_timing) */ -#if defined(VK_HUAWEI_cluster_culling_shader) -PFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI; -PFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI; -#endif /* defined(VK_HUAWEI_cluster_culling_shader) */ -#if defined(VK_HUAWEI_invocation_mask) -PFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI; -#endif /* defined(VK_HUAWEI_invocation_mask) */ -#if defined(VK_HUAWEI_subpass_shading) -PFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI; -PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI; -#endif /* defined(VK_HUAWEI_subpass_shading) */ -#if defined(VK_INTEL_performance_query) -PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL; -PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL; -PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL; -PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL; -PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL; -PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL; -PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL; -PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL; -PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL; -#endif /* defined(VK_INTEL_performance_query) */ -#if defined(VK_KHR_acceleration_structure) -PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR; -PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR; -PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR; -PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR; -PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR; -PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR; -PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR; -PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR; -PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR; -PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR; -PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR; -PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR; -PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR; -PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR; -PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR; -PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR; -#endif /* defined(VK_KHR_acceleration_structure) */ -#if defined(VK_KHR_android_surface) -PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR; -#endif /* defined(VK_KHR_android_surface) */ -#if defined(VK_KHR_bind_memory2) -PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR; -PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR; -#endif /* defined(VK_KHR_bind_memory2) */ -#if defined(VK_KHR_buffer_device_address) -PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR; -PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR; -PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR; -#endif /* defined(VK_KHR_buffer_device_address) */ -#if defined(VK_KHR_cooperative_matrix) -PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR; -#endif /* defined(VK_KHR_cooperative_matrix) */ -#if defined(VK_KHR_copy_commands2) -PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR; -PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR; -PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR; -PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR; -PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR; -PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR; -#endif /* defined(VK_KHR_copy_commands2) */ -#if defined(VK_KHR_create_renderpass2) -PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR; -PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR; -PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR; -PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR; -#endif /* defined(VK_KHR_create_renderpass2) */ -#if defined(VK_KHR_deferred_host_operations) -PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR; -PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR; -PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR; -PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR; -PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR; -#endif /* defined(VK_KHR_deferred_host_operations) */ -#if defined(VK_KHR_descriptor_update_template) -PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR; -PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR; -PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR; -#endif /* defined(VK_KHR_descriptor_update_template) */ -#if defined(VK_KHR_device_group) -PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR; -PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR; -PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR; -#endif /* defined(VK_KHR_device_group) */ -#if defined(VK_KHR_device_group_creation) -PFN_vkEnumeratePhysicalDeviceGroupsKHR vkEnumeratePhysicalDeviceGroupsKHR; -#endif /* defined(VK_KHR_device_group_creation) */ -#if defined(VK_KHR_display) -PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR; -PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR; -PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR; -PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR; -PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR; -PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR; -PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR; -#endif /* defined(VK_KHR_display) */ -#if defined(VK_KHR_display_swapchain) -PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR; -#endif /* defined(VK_KHR_display_swapchain) */ -#if defined(VK_KHR_draw_indirect_count) -PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR; -PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR; -#endif /* defined(VK_KHR_draw_indirect_count) */ -#if defined(VK_KHR_dynamic_rendering) -PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR; -PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR; -#endif /* defined(VK_KHR_dynamic_rendering) */ -#if defined(VK_KHR_external_fence_capabilities) -PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR; -#endif /* defined(VK_KHR_external_fence_capabilities) */ -#if defined(VK_KHR_external_fence_fd) -PFN_vkGetFenceFdKHR vkGetFenceFdKHR; -PFN_vkImportFenceFdKHR vkImportFenceFdKHR; -#endif /* defined(VK_KHR_external_fence_fd) */ -#if defined(VK_KHR_external_fence_win32) -PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR; -PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR; -#endif /* defined(VK_KHR_external_fence_win32) */ -#if defined(VK_KHR_external_memory_capabilities) -PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR; -#endif /* defined(VK_KHR_external_memory_capabilities) */ -#if defined(VK_KHR_external_memory_fd) -PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR; -PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR; -#endif /* defined(VK_KHR_external_memory_fd) */ -#if defined(VK_KHR_external_memory_win32) -PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR; -PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR; -#endif /* defined(VK_KHR_external_memory_win32) */ -#if defined(VK_KHR_external_semaphore_capabilities) -PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR vkGetPhysicalDeviceExternalSemaphorePropertiesKHR; -#endif /* defined(VK_KHR_external_semaphore_capabilities) */ -#if defined(VK_KHR_external_semaphore_fd) -PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR; -PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR; -#endif /* defined(VK_KHR_external_semaphore_fd) */ -#if defined(VK_KHR_external_semaphore_win32) -PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR; -PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR; -#endif /* defined(VK_KHR_external_semaphore_win32) */ -#if defined(VK_KHR_fragment_shading_rate) -PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR; -PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR vkGetPhysicalDeviceFragmentShadingRatesKHR; -#endif /* defined(VK_KHR_fragment_shading_rate) */ -#if defined(VK_KHR_get_display_properties2) -PFN_vkGetDisplayModeProperties2KHR vkGetDisplayModeProperties2KHR; -PFN_vkGetDisplayPlaneCapabilities2KHR vkGetDisplayPlaneCapabilities2KHR; -PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR vkGetPhysicalDeviceDisplayPlaneProperties2KHR; -PFN_vkGetPhysicalDeviceDisplayProperties2KHR vkGetPhysicalDeviceDisplayProperties2KHR; -#endif /* defined(VK_KHR_get_display_properties2) */ -#if defined(VK_KHR_get_memory_requirements2) -PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR; -PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR; -PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR; -#endif /* defined(VK_KHR_get_memory_requirements2) */ -#if defined(VK_KHR_get_physical_device_properties2) -PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR; -PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR; -PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR; -PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR; -PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR; -PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR; -PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR; -#endif /* defined(VK_KHR_get_physical_device_properties2) */ -#if defined(VK_KHR_get_surface_capabilities2) -PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR; -PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR; -#endif /* defined(VK_KHR_get_surface_capabilities2) */ -#if defined(VK_KHR_maintenance1) -PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR; -#endif /* defined(VK_KHR_maintenance1) */ -#if defined(VK_KHR_maintenance3) -PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR; -#endif /* defined(VK_KHR_maintenance3) */ -#if defined(VK_KHR_maintenance4) -PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR; -PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR; -PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR; -#endif /* defined(VK_KHR_maintenance4) */ -#if defined(VK_KHR_maintenance5) -PFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR; -PFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR; -PFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR; -PFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR; -#endif /* defined(VK_KHR_maintenance5) */ -#if defined(VK_KHR_map_memory2) -PFN_vkMapMemory2KHR vkMapMemory2KHR; -PFN_vkUnmapMemory2KHR vkUnmapMemory2KHR; -#endif /* defined(VK_KHR_map_memory2) */ -#if defined(VK_KHR_performance_query) -PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR; -PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR; -PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR; -PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR; -#endif /* defined(VK_KHR_performance_query) */ -#if defined(VK_KHR_pipeline_executable_properties) -PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR; -PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR; -PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR; -#endif /* defined(VK_KHR_pipeline_executable_properties) */ -#if defined(VK_KHR_present_wait) -PFN_vkWaitForPresentKHR vkWaitForPresentKHR; -#endif /* defined(VK_KHR_present_wait) */ -#if defined(VK_KHR_push_descriptor) -PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR; -#endif /* defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) -PFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR; -#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_ray_tracing_pipeline) -PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR; -PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR; -PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR; -PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR; -PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR; -PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR; -PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR; -#endif /* defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_sampler_ycbcr_conversion) -PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR; -PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR; -#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */ -#if defined(VK_KHR_shared_presentable_image) -PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR; -#endif /* defined(VK_KHR_shared_presentable_image) */ -#if defined(VK_KHR_surface) -PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; -PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR; -PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR; -PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR; -PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; -#endif /* defined(VK_KHR_surface) */ -#if defined(VK_KHR_swapchain) -PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; -PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; -PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; -PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; -PFN_vkQueuePresentKHR vkQueuePresentKHR; -#endif /* defined(VK_KHR_swapchain) */ -#if defined(VK_KHR_synchronization2) -PFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR; -PFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR; -PFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR; -PFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR; -PFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR; -PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR; -#endif /* defined(VK_KHR_synchronization2) */ -#if defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) -PFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD; -#endif /* defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) */ -#if defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) -PFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV; -#endif /* defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_KHR_timeline_semaphore) -PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR; -PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR; -PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR; -#endif /* defined(VK_KHR_timeline_semaphore) */ -#if defined(VK_KHR_video_decode_queue) -PFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR; -#endif /* defined(VK_KHR_video_decode_queue) */ -#if defined(VK_KHR_video_encode_queue) -PFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR; -PFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR; -PFN_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR; -#endif /* defined(VK_KHR_video_encode_queue) */ -#if defined(VK_KHR_video_queue) -PFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR; -PFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR; -PFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR; -PFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR; -PFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR; -PFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR; -PFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR; -PFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR; -PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR vkGetPhysicalDeviceVideoCapabilitiesKHR; -PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR vkGetPhysicalDeviceVideoFormatPropertiesKHR; -PFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR; -PFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR; -#endif /* defined(VK_KHR_video_queue) */ -#if defined(VK_KHR_wayland_surface) -PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR; -PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR; -#endif /* defined(VK_KHR_wayland_surface) */ -#if defined(VK_KHR_win32_surface) -PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR; -PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR; -#endif /* defined(VK_KHR_win32_surface) */ -#if defined(VK_KHR_xcb_surface) -PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR; -PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR; -#endif /* defined(VK_KHR_xcb_surface) */ -#if defined(VK_KHR_xlib_surface) -PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR; -PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR; -#endif /* defined(VK_KHR_xlib_surface) */ -#if defined(VK_MVK_ios_surface) -PFN_vkCreateIOSSurfaceMVK vkCreateIOSSurfaceMVK; -#endif /* defined(VK_MVK_ios_surface) */ -#if defined(VK_MVK_macos_surface) -PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK; -#endif /* defined(VK_MVK_macos_surface) */ -#if defined(VK_NN_vi_surface) -PFN_vkCreateViSurfaceNN vkCreateViSurfaceNN; -#endif /* defined(VK_NN_vi_surface) */ -#if defined(VK_NVX_binary_import) -PFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX; -PFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX; -PFN_vkCreateCuModuleNVX vkCreateCuModuleNVX; -PFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX; -PFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX; -#endif /* defined(VK_NVX_binary_import) */ -#if defined(VK_NVX_image_view_handle) -PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX; -PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX; -#endif /* defined(VK_NVX_image_view_handle) */ -#if defined(VK_NV_acquire_winrt_display) -PFN_vkAcquireWinrtDisplayNV vkAcquireWinrtDisplayNV; -PFN_vkGetWinrtDisplayNV vkGetWinrtDisplayNV; -#endif /* defined(VK_NV_acquire_winrt_display) */ -#if defined(VK_NV_clip_space_w_scaling) -PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV; -#endif /* defined(VK_NV_clip_space_w_scaling) */ -#if defined(VK_NV_cooperative_matrix) -PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV vkGetPhysicalDeviceCooperativeMatrixPropertiesNV; -#endif /* defined(VK_NV_cooperative_matrix) */ -#if defined(VK_NV_copy_memory_indirect) -PFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV; -PFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV; -#endif /* defined(VK_NV_copy_memory_indirect) */ -#if defined(VK_NV_coverage_reduction_mode) -PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV; -#endif /* defined(VK_NV_coverage_reduction_mode) */ -#if defined(VK_NV_cuda_kernel_launch) -PFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV; -PFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV; -PFN_vkCreateCudaModuleNV vkCreateCudaModuleNV; -PFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV; -PFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV; -PFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV; -#endif /* defined(VK_NV_cuda_kernel_launch) */ -#if defined(VK_NV_device_diagnostic_checkpoints) -PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV; -PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV; -#endif /* defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_NV_device_generated_commands) -PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV; -PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV; -PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV; -PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV; -PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV; -PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV; -#endif /* defined(VK_NV_device_generated_commands) */ -#if defined(VK_NV_device_generated_commands_compute) -PFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV; -PFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV; -PFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV; -#endif /* defined(VK_NV_device_generated_commands_compute) */ -#if defined(VK_NV_external_memory_capabilities) -PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV vkGetPhysicalDeviceExternalImageFormatPropertiesNV; -#endif /* defined(VK_NV_external_memory_capabilities) */ -#if defined(VK_NV_external_memory_rdma) -PFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV; -#endif /* defined(VK_NV_external_memory_rdma) */ -#if defined(VK_NV_external_memory_win32) -PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV; -#endif /* defined(VK_NV_external_memory_win32) */ -#if defined(VK_NV_fragment_shading_rate_enums) -PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV; -#endif /* defined(VK_NV_fragment_shading_rate_enums) */ -#if defined(VK_NV_low_latency2) -PFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV; -PFN_vkLatencySleepNV vkLatencySleepNV; -PFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV; -PFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV; -PFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV; -#endif /* defined(VK_NV_low_latency2) */ -#if defined(VK_NV_memory_decompression) -PFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV; -PFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV; -#endif /* defined(VK_NV_memory_decompression) */ -#if defined(VK_NV_mesh_shader) -PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV; -PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV; -PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV; -#endif /* defined(VK_NV_mesh_shader) */ -#if defined(VK_NV_optical_flow) -PFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV; -PFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV; -PFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV; -PFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV; -PFN_vkGetPhysicalDeviceOpticalFlowImageFormatsNV vkGetPhysicalDeviceOpticalFlowImageFormatsNV; -#endif /* defined(VK_NV_optical_flow) */ -#if defined(VK_NV_ray_tracing) -PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV; -PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV; -PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV; -PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV; -PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV; -PFN_vkCompileDeferredNV vkCompileDeferredNV; -PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV; -PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV; -PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV; -PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV; -PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV; -PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV; -#endif /* defined(VK_NV_ray_tracing) */ -#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 -PFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV; -#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */ -#if defined(VK_NV_scissor_exclusive) -PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV; -#endif /* defined(VK_NV_scissor_exclusive) */ -#if defined(VK_NV_shading_rate_image) -PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV; -PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV; -PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV; -#endif /* defined(VK_NV_shading_rate_image) */ -#if defined(VK_QCOM_tile_properties) -PFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM; -PFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM; -#endif /* defined(VK_QCOM_tile_properties) */ -#if defined(VK_QNX_external_memory_screen_buffer) -PFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX; -#endif /* defined(VK_QNX_external_memory_screen_buffer) */ -#if defined(VK_QNX_screen_surface) -PFN_vkCreateScreenSurfaceQNX vkCreateScreenSurfaceQNX; -PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX vkGetPhysicalDeviceScreenPresentationSupportQNX; -#endif /* defined(VK_QNX_screen_surface) */ -#if defined(VK_VALVE_descriptor_set_host_mapping) -PFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE; -PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE; -#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */ -#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) -PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT; -PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT; -PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT; -PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT; -PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT; -PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT; -PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT; -PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT; -PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT; -PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT; -PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT; -PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) -PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT; -PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT; -PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT; -PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT; -PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) -PFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT; -PFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT; -PFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT; -PFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT; -PFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT; -PFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT; -PFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT; -PFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT; -PFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT; -PFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT; -PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT; -PFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT; -PFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT; -PFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT; -PFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT; -PFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT; -PFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT; -PFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT; -PFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT; -PFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT; -PFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) -PFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) -PFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) -PFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV; -PFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) -PFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV; -PFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV; -PFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) -PFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) -PFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) -PFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */ -#if (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1)) -PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT; -#endif /* (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) -PFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT; -#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */ -#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) -PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT; -#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */ -#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template)) -PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR; -#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) -PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR; -PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR; -PFN_vkGetPhysicalDevicePresentRectanglesKHR vkGetPhysicalDevicePresentRectanglesKHR; -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) -PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR; -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ -/* VOLK_GENERATE_PROTOTYPES_C */ - -#ifdef __GNUC__ -# pragma GCC visibility pop -#endif - -#ifdef __cplusplus -} -#endif -/* clang-format on */ diff --git a/third_party/volk.h b/third_party/volk.h deleted file mode 100644 index 47fbed2..0000000 --- a/third_party/volk.h +++ /dev/null @@ -1,1985 +0,0 @@ -/** - * volk - * - * Copyright (C) 2018-2023, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com) - * Report bugs and download new versions at https://github.com/zeux/volk - * - * This library is distributed under the MIT License. See notice at the end of this file. - */ -/* clang-format off */ -#ifndef VOLK_H_ -#define VOLK_H_ - -#if defined(VULKAN_H_) && !defined(VK_NO_PROTOTYPES) -# error To use volk, you need to define VK_NO_PROTOTYPES before including vulkan.h -#endif - -/* VOLK_GENERATE_VERSION_DEFINE */ -#define VOLK_HEADER_VERSION 270 -/* VOLK_GENERATE_VERSION_DEFINE */ - -#ifndef VK_NO_PROTOTYPES -# define VK_NO_PROTOTYPES -#endif - -#ifndef VULKAN_H_ -# ifdef VOLK_VULKAN_H_PATH -# include VOLK_VULKAN_H_PATH -# elif defined(VK_USE_PLATFORM_WIN32_KHR) -# include -# include - - /* When VK_USE_PLATFORM_WIN32_KHR is defined, instead of including vulkan.h directly, we include individual parts of the SDK - * This is necessary to avoid including which is very heavy - it takes 200ms to parse without WIN32_LEAN_AND_MEAN - * and 100ms to parse with it. vulkan_win32.h only needs a few symbols that are easy to redefine ourselves. - */ - typedef unsigned long DWORD; - typedef const wchar_t* LPCWSTR; - typedef void* HANDLE; - typedef struct HINSTANCE__* HINSTANCE; - typedef struct HWND__* HWND; - typedef struct HMONITOR__* HMONITOR; - typedef struct _SECURITY_ATTRIBUTES SECURITY_ATTRIBUTES; - -# include - -# ifdef VK_ENABLE_BETA_EXTENSIONS -# include -# endif -# else -# include -# endif -#endif - -/* Disable several extensions on earlier SDKs because later SDKs introduce a backwards incompatible change to function signatures */ -#if VK_HEADER_VERSION < 140 -# undef VK_NVX_image_view_handle -#endif -#if VK_HEADER_VERSION < 184 -# undef VK_HUAWEI_subpass_shading -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -struct VolkDeviceTable; - -/** - * Initialize library by loading Vulkan loader; call this function before creating the Vulkan instance. - * - * Returns VK_SUCCESS on success and VK_ERROR_INITIALIZATION_FAILED otherwise. - */ -VkResult volkInitialize(void); - -/** - * Initialize library by providing a custom handler to load global symbols. - * - * This function can be used instead of volkInitialize. - * The handler function pointer will be asked to load global Vulkan symbols which require no instance - * (such as vkCreateInstance, vkEnumerateInstance* and vkEnumerateInstanceVersion if available). - */ -void volkInitializeCustom(PFN_vkGetInstanceProcAddr handler); - -/** - * Finalize library by unloading Vulkan loader and resetting global symbols to NULL. - */ -void volkFinalize(void); - -/** - * Get Vulkan instance version supported by the Vulkan loader, or 0 if Vulkan isn't supported - * - * Returns 0 if volkInitialize wasn't called or failed. - */ -uint32_t volkGetInstanceVersion(void); - -/** - * Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance. - */ -void volkLoadInstance(VkInstance instance); - -/** - * Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance. - * Skips loading device-based function pointers, requires usage of volkLoadDevice afterwards. - */ -void volkLoadInstanceOnly(VkInstance instance); - -/** - * Load global function pointers using application-created VkDevice; call this function after creating the Vulkan device. - * - * Note: this is not suitable for applications that want to use multiple VkDevice objects concurrently. - */ -void volkLoadDevice(VkDevice device); - -/** - * Return last VkInstance for which global function pointers have been loaded via volkLoadInstance(), - * or VK_NULL_HANDLE if volkLoadInstance() has not been called. - */ -VkInstance volkGetLoadedInstance(void); - -/** - * Return last VkDevice for which global function pointers have been loaded via volkLoadDevice(), - * or VK_NULL_HANDLE if volkLoadDevice() has not been called. - */ -VkDevice volkGetLoadedDevice(void); - -/** - * Load function pointers using application-created VkDevice into a table. - * Application should use function pointers from that table instead of using global function pointers. - */ -void volkLoadDeviceTable(struct VolkDeviceTable* table, VkDevice device); - -/** - * Device-specific function pointer table - */ -struct VolkDeviceTable -{ - /* VOLK_GENERATE_DEVICE_TABLE */ -#if defined(VK_VERSION_1_0) - PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; - PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; - PFN_vkAllocateMemory vkAllocateMemory; - PFN_vkBeginCommandBuffer vkBeginCommandBuffer; - PFN_vkBindBufferMemory vkBindBufferMemory; - PFN_vkBindImageMemory vkBindImageMemory; - PFN_vkCmdBeginQuery vkCmdBeginQuery; - PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass; - PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; - PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; - PFN_vkCmdBindPipeline vkCmdBindPipeline; - PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers; - PFN_vkCmdBlitImage vkCmdBlitImage; - PFN_vkCmdClearAttachments vkCmdClearAttachments; - PFN_vkCmdClearColorImage vkCmdClearColorImage; - PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage; - PFN_vkCmdCopyBuffer vkCmdCopyBuffer; - PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; - PFN_vkCmdCopyImage vkCmdCopyImage; - PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; - PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults; - PFN_vkCmdDispatch vkCmdDispatch; - PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect; - PFN_vkCmdDraw vkCmdDraw; - PFN_vkCmdDrawIndexed vkCmdDrawIndexed; - PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect; - PFN_vkCmdDrawIndirect vkCmdDrawIndirect; - PFN_vkCmdEndQuery vkCmdEndQuery; - PFN_vkCmdEndRenderPass vkCmdEndRenderPass; - PFN_vkCmdExecuteCommands vkCmdExecuteCommands; - PFN_vkCmdFillBuffer vkCmdFillBuffer; - PFN_vkCmdNextSubpass vkCmdNextSubpass; - PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; - PFN_vkCmdPushConstants vkCmdPushConstants; - PFN_vkCmdResetEvent vkCmdResetEvent; - PFN_vkCmdResetQueryPool vkCmdResetQueryPool; - PFN_vkCmdResolveImage vkCmdResolveImage; - PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; - PFN_vkCmdSetDepthBias vkCmdSetDepthBias; - PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; - PFN_vkCmdSetEvent vkCmdSetEvent; - PFN_vkCmdSetLineWidth vkCmdSetLineWidth; - PFN_vkCmdSetScissor vkCmdSetScissor; - PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; - PFN_vkCmdSetStencilReference vkCmdSetStencilReference; - PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; - PFN_vkCmdSetViewport vkCmdSetViewport; - PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer; - PFN_vkCmdWaitEvents vkCmdWaitEvents; - PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp; - PFN_vkCreateBuffer vkCreateBuffer; - PFN_vkCreateBufferView vkCreateBufferView; - PFN_vkCreateCommandPool vkCreateCommandPool; - PFN_vkCreateComputePipelines vkCreateComputePipelines; - PFN_vkCreateDescriptorPool vkCreateDescriptorPool; - PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; - PFN_vkCreateEvent vkCreateEvent; - PFN_vkCreateFence vkCreateFence; - PFN_vkCreateFramebuffer vkCreateFramebuffer; - PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; - PFN_vkCreateImage vkCreateImage; - PFN_vkCreateImageView vkCreateImageView; - PFN_vkCreatePipelineCache vkCreatePipelineCache; - PFN_vkCreatePipelineLayout vkCreatePipelineLayout; - PFN_vkCreateQueryPool vkCreateQueryPool; - PFN_vkCreateRenderPass vkCreateRenderPass; - PFN_vkCreateSampler vkCreateSampler; - PFN_vkCreateSemaphore vkCreateSemaphore; - PFN_vkCreateShaderModule vkCreateShaderModule; - PFN_vkDestroyBuffer vkDestroyBuffer; - PFN_vkDestroyBufferView vkDestroyBufferView; - PFN_vkDestroyCommandPool vkDestroyCommandPool; - PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; - PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; - PFN_vkDestroyDevice vkDestroyDevice; - PFN_vkDestroyEvent vkDestroyEvent; - PFN_vkDestroyFence vkDestroyFence; - PFN_vkDestroyFramebuffer vkDestroyFramebuffer; - PFN_vkDestroyImage vkDestroyImage; - PFN_vkDestroyImageView vkDestroyImageView; - PFN_vkDestroyPipeline vkDestroyPipeline; - PFN_vkDestroyPipelineCache vkDestroyPipelineCache; - PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; - PFN_vkDestroyQueryPool vkDestroyQueryPool; - PFN_vkDestroyRenderPass vkDestroyRenderPass; - PFN_vkDestroySampler vkDestroySampler; - PFN_vkDestroySemaphore vkDestroySemaphore; - PFN_vkDestroyShaderModule vkDestroyShaderModule; - PFN_vkDeviceWaitIdle vkDeviceWaitIdle; - PFN_vkEndCommandBuffer vkEndCommandBuffer; - PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; - PFN_vkFreeCommandBuffers vkFreeCommandBuffers; - PFN_vkFreeDescriptorSets vkFreeDescriptorSets; - PFN_vkFreeMemory vkFreeMemory; - PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; - PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment; - PFN_vkGetDeviceQueue vkGetDeviceQueue; - PFN_vkGetEventStatus vkGetEventStatus; - PFN_vkGetFenceStatus vkGetFenceStatus; - PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; - PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements; - PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout; - PFN_vkGetPipelineCacheData vkGetPipelineCacheData; - PFN_vkGetQueryPoolResults vkGetQueryPoolResults; - PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity; - PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; - PFN_vkMapMemory vkMapMemory; - PFN_vkMergePipelineCaches vkMergePipelineCaches; - PFN_vkQueueBindSparse vkQueueBindSparse; - PFN_vkQueueSubmit vkQueueSubmit; - PFN_vkQueueWaitIdle vkQueueWaitIdle; - PFN_vkResetCommandBuffer vkResetCommandBuffer; - PFN_vkResetCommandPool vkResetCommandPool; - PFN_vkResetDescriptorPool vkResetDescriptorPool; - PFN_vkResetEvent vkResetEvent; - PFN_vkResetFences vkResetFences; - PFN_vkSetEvent vkSetEvent; - PFN_vkUnmapMemory vkUnmapMemory; - PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; - PFN_vkWaitForFences vkWaitForFences; -#endif /* defined(VK_VERSION_1_0) */ -#if defined(VK_VERSION_1_1) - PFN_vkBindBufferMemory2 vkBindBufferMemory2; - PFN_vkBindImageMemory2 vkBindImageMemory2; - PFN_vkCmdDispatchBase vkCmdDispatchBase; - PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask; - PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate; - PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion; - PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate; - PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion; - PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; - PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport; - PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures; - PFN_vkGetDeviceQueue2 vkGetDeviceQueue2; - PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; - PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2; - PFN_vkTrimCommandPool vkTrimCommandPool; - PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate; -#endif /* defined(VK_VERSION_1_1) */ -#if defined(VK_VERSION_1_2) - PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2; - PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount; - PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount; - PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2; - PFN_vkCmdNextSubpass2 vkCmdNextSubpass2; - PFN_vkCreateRenderPass2 vkCreateRenderPass2; - PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress; - PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress; - PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress; - PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue; - PFN_vkResetQueryPool vkResetQueryPool; - PFN_vkSignalSemaphore vkSignalSemaphore; - PFN_vkWaitSemaphores vkWaitSemaphores; -#endif /* defined(VK_VERSION_1_2) */ -#if defined(VK_VERSION_1_3) - PFN_vkCmdBeginRendering vkCmdBeginRendering; - PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2; - PFN_vkCmdBlitImage2 vkCmdBlitImage2; - PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2; - PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2; - PFN_vkCmdCopyImage2 vkCmdCopyImage2; - PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2; - PFN_vkCmdEndRendering vkCmdEndRendering; - PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2; - PFN_vkCmdResetEvent2 vkCmdResetEvent2; - PFN_vkCmdResolveImage2 vkCmdResolveImage2; - PFN_vkCmdSetCullMode vkCmdSetCullMode; - PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable; - PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable; - PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp; - PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable; - PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable; - PFN_vkCmdSetEvent2 vkCmdSetEvent2; - PFN_vkCmdSetFrontFace vkCmdSetFrontFace; - PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable; - PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology; - PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable; - PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount; - PFN_vkCmdSetStencilOp vkCmdSetStencilOp; - PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable; - PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount; - PFN_vkCmdWaitEvents2 vkCmdWaitEvents2; - PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2; - PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot; - PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot; - PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements; - PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements; - PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements; - PFN_vkGetPrivateData vkGetPrivateData; - PFN_vkQueueSubmit2 vkQueueSubmit2; - PFN_vkSetPrivateData vkSetPrivateData; -#endif /* defined(VK_VERSION_1_3) */ -#if defined(VK_AMDX_shader_enqueue) - PFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX; - PFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX; - PFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX; - PFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX; - PFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX; - PFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX; - PFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX; -#endif /* defined(VK_AMDX_shader_enqueue) */ -#if defined(VK_AMD_buffer_marker) - PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD; -#endif /* defined(VK_AMD_buffer_marker) */ -#if defined(VK_AMD_display_native_hdr) - PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD; -#endif /* defined(VK_AMD_display_native_hdr) */ -#if defined(VK_AMD_draw_indirect_count) - PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD; - PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD; -#endif /* defined(VK_AMD_draw_indirect_count) */ -#if defined(VK_AMD_shader_info) - PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD; -#endif /* defined(VK_AMD_shader_info) */ -#if defined(VK_ANDROID_external_memory_android_hardware_buffer) - PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID; - PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID; -#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */ -#if defined(VK_EXT_attachment_feedback_loop_dynamic_state) - PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT; -#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */ -#if defined(VK_EXT_buffer_device_address) - PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT; -#endif /* defined(VK_EXT_buffer_device_address) */ -#if defined(VK_EXT_calibrated_timestamps) - PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT; -#endif /* defined(VK_EXT_calibrated_timestamps) */ -#if defined(VK_EXT_color_write_enable) - PFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT; -#endif /* defined(VK_EXT_color_write_enable) */ -#if defined(VK_EXT_conditional_rendering) - PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT; - PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT; -#endif /* defined(VK_EXT_conditional_rendering) */ -#if defined(VK_EXT_debug_marker) - PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT; - PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT; - PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT; - PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT; - PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT; -#endif /* defined(VK_EXT_debug_marker) */ -#if defined(VK_EXT_depth_bias_control) - PFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT; -#endif /* defined(VK_EXT_depth_bias_control) */ -#if defined(VK_EXT_descriptor_buffer) - PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT; - PFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT; - PFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT; - PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT; - PFN_vkGetDescriptorEXT vkGetDescriptorEXT; - PFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT; - PFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT; - PFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT; - PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT; - PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT; -#endif /* defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) - PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT; -#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */ -#if defined(VK_EXT_device_fault) - PFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT; -#endif /* defined(VK_EXT_device_fault) */ -#if defined(VK_EXT_discard_rectangles) - PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT; -#endif /* defined(VK_EXT_discard_rectangles) */ -#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 - PFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT; - PFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT; -#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */ -#if defined(VK_EXT_display_control) - PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT; - PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT; - PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT; - PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT; -#endif /* defined(VK_EXT_display_control) */ -#if defined(VK_EXT_external_memory_host) - PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT; -#endif /* defined(VK_EXT_external_memory_host) */ -#if defined(VK_EXT_full_screen_exclusive) - PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT; - PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT; -#endif /* defined(VK_EXT_full_screen_exclusive) */ -#if defined(VK_EXT_hdr_metadata) - PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT; -#endif /* defined(VK_EXT_hdr_metadata) */ -#if defined(VK_EXT_host_image_copy) - PFN_vkCopyImageToImageEXT vkCopyImageToImageEXT; - PFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT; - PFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT; - PFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT; -#endif /* defined(VK_EXT_host_image_copy) */ -#if defined(VK_EXT_host_query_reset) - PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT; -#endif /* defined(VK_EXT_host_query_reset) */ -#if defined(VK_EXT_image_drm_format_modifier) - PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT; -#endif /* defined(VK_EXT_image_drm_format_modifier) */ -#if defined(VK_EXT_line_rasterization) - PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT; -#endif /* defined(VK_EXT_line_rasterization) */ -#if defined(VK_EXT_mesh_shader) - PFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT; - PFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT; - PFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT; -#endif /* defined(VK_EXT_mesh_shader) */ -#if defined(VK_EXT_metal_objects) - PFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT; -#endif /* defined(VK_EXT_metal_objects) */ -#if defined(VK_EXT_multi_draw) - PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT; - PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT; -#endif /* defined(VK_EXT_multi_draw) */ -#if defined(VK_EXT_opacity_micromap) - PFN_vkBuildMicromapsEXT vkBuildMicromapsEXT; - PFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT; - PFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT; - PFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT; - PFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT; - PFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT; - PFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT; - PFN_vkCopyMicromapEXT vkCopyMicromapEXT; - PFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT; - PFN_vkCreateMicromapEXT vkCreateMicromapEXT; - PFN_vkDestroyMicromapEXT vkDestroyMicromapEXT; - PFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT; - PFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT; - PFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT; -#endif /* defined(VK_EXT_opacity_micromap) */ -#if defined(VK_EXT_pageable_device_local_memory) - PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT; -#endif /* defined(VK_EXT_pageable_device_local_memory) */ -#if defined(VK_EXT_pipeline_properties) - PFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT; -#endif /* defined(VK_EXT_pipeline_properties) */ -#if defined(VK_EXT_private_data) - PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT; - PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT; - PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT; - PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT; -#endif /* defined(VK_EXT_private_data) */ -#if defined(VK_EXT_sample_locations) - PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT; -#endif /* defined(VK_EXT_sample_locations) */ -#if defined(VK_EXT_shader_module_identifier) - PFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT; - PFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT; -#endif /* defined(VK_EXT_shader_module_identifier) */ -#if defined(VK_EXT_shader_object) - PFN_vkCmdBindShadersEXT vkCmdBindShadersEXT; - PFN_vkCreateShadersEXT vkCreateShadersEXT; - PFN_vkDestroyShaderEXT vkDestroyShaderEXT; - PFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT; -#endif /* defined(VK_EXT_shader_object) */ -#if defined(VK_EXT_swapchain_maintenance1) - PFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT; -#endif /* defined(VK_EXT_swapchain_maintenance1) */ -#if defined(VK_EXT_transform_feedback) - PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT; - PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT; - PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT; - PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT; - PFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT; - PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT; -#endif /* defined(VK_EXT_transform_feedback) */ -#if defined(VK_EXT_validation_cache) - PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT; - PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT; - PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT; - PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT; -#endif /* defined(VK_EXT_validation_cache) */ -#if defined(VK_FUCHSIA_buffer_collection) - PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA; - PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA; - PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA; - PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA; - PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA; -#endif /* defined(VK_FUCHSIA_buffer_collection) */ -#if defined(VK_FUCHSIA_external_memory) - PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA; - PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA; -#endif /* defined(VK_FUCHSIA_external_memory) */ -#if defined(VK_FUCHSIA_external_semaphore) - PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA; - PFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA; -#endif /* defined(VK_FUCHSIA_external_semaphore) */ -#if defined(VK_GOOGLE_display_timing) - PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE; - PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE; -#endif /* defined(VK_GOOGLE_display_timing) */ -#if defined(VK_HUAWEI_cluster_culling_shader) - PFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI; - PFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI; -#endif /* defined(VK_HUAWEI_cluster_culling_shader) */ -#if defined(VK_HUAWEI_invocation_mask) - PFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI; -#endif /* defined(VK_HUAWEI_invocation_mask) */ -#if defined(VK_HUAWEI_subpass_shading) - PFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI; - PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI; -#endif /* defined(VK_HUAWEI_subpass_shading) */ -#if defined(VK_INTEL_performance_query) - PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL; - PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL; - PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL; - PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL; - PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL; - PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL; - PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL; - PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL; - PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL; -#endif /* defined(VK_INTEL_performance_query) */ -#if defined(VK_KHR_acceleration_structure) - PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR; - PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR; - PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR; - PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR; - PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR; - PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR; - PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR; - PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR; - PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR; - PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR; - PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR; - PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR; - PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR; - PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR; - PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR; - PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR; -#endif /* defined(VK_KHR_acceleration_structure) */ -#if defined(VK_KHR_bind_memory2) - PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR; - PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR; -#endif /* defined(VK_KHR_bind_memory2) */ -#if defined(VK_KHR_buffer_device_address) - PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR; - PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR; - PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR; -#endif /* defined(VK_KHR_buffer_device_address) */ -#if defined(VK_KHR_copy_commands2) - PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR; - PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR; - PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR; - PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR; - PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR; - PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR; -#endif /* defined(VK_KHR_copy_commands2) */ -#if defined(VK_KHR_create_renderpass2) - PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR; - PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR; - PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR; - PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR; -#endif /* defined(VK_KHR_create_renderpass2) */ -#if defined(VK_KHR_deferred_host_operations) - PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR; - PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR; - PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR; - PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR; - PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR; -#endif /* defined(VK_KHR_deferred_host_operations) */ -#if defined(VK_KHR_descriptor_update_template) - PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR; - PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR; - PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR; -#endif /* defined(VK_KHR_descriptor_update_template) */ -#if defined(VK_KHR_device_group) - PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR; - PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR; - PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR; -#endif /* defined(VK_KHR_device_group) */ -#if defined(VK_KHR_display_swapchain) - PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR; -#endif /* defined(VK_KHR_display_swapchain) */ -#if defined(VK_KHR_draw_indirect_count) - PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR; - PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR; -#endif /* defined(VK_KHR_draw_indirect_count) */ -#if defined(VK_KHR_dynamic_rendering) - PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR; - PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR; -#endif /* defined(VK_KHR_dynamic_rendering) */ -#if defined(VK_KHR_external_fence_fd) - PFN_vkGetFenceFdKHR vkGetFenceFdKHR; - PFN_vkImportFenceFdKHR vkImportFenceFdKHR; -#endif /* defined(VK_KHR_external_fence_fd) */ -#if defined(VK_KHR_external_fence_win32) - PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR; - PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR; -#endif /* defined(VK_KHR_external_fence_win32) */ -#if defined(VK_KHR_external_memory_fd) - PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR; - PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR; -#endif /* defined(VK_KHR_external_memory_fd) */ -#if defined(VK_KHR_external_memory_win32) - PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR; - PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR; -#endif /* defined(VK_KHR_external_memory_win32) */ -#if defined(VK_KHR_external_semaphore_fd) - PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR; - PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR; -#endif /* defined(VK_KHR_external_semaphore_fd) */ -#if defined(VK_KHR_external_semaphore_win32) - PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR; - PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR; -#endif /* defined(VK_KHR_external_semaphore_win32) */ -#if defined(VK_KHR_fragment_shading_rate) - PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR; -#endif /* defined(VK_KHR_fragment_shading_rate) */ -#if defined(VK_KHR_get_memory_requirements2) - PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR; - PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR; - PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR; -#endif /* defined(VK_KHR_get_memory_requirements2) */ -#if defined(VK_KHR_maintenance1) - PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR; -#endif /* defined(VK_KHR_maintenance1) */ -#if defined(VK_KHR_maintenance3) - PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR; -#endif /* defined(VK_KHR_maintenance3) */ -#if defined(VK_KHR_maintenance4) - PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR; - PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR; - PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR; -#endif /* defined(VK_KHR_maintenance4) */ -#if defined(VK_KHR_maintenance5) - PFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR; - PFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR; - PFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR; - PFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR; -#endif /* defined(VK_KHR_maintenance5) */ -#if defined(VK_KHR_map_memory2) - PFN_vkMapMemory2KHR vkMapMemory2KHR; - PFN_vkUnmapMemory2KHR vkUnmapMemory2KHR; -#endif /* defined(VK_KHR_map_memory2) */ -#if defined(VK_KHR_performance_query) - PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR; - PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR; -#endif /* defined(VK_KHR_performance_query) */ -#if defined(VK_KHR_pipeline_executable_properties) - PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR; - PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR; - PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR; -#endif /* defined(VK_KHR_pipeline_executable_properties) */ -#if defined(VK_KHR_present_wait) - PFN_vkWaitForPresentKHR vkWaitForPresentKHR; -#endif /* defined(VK_KHR_present_wait) */ -#if defined(VK_KHR_push_descriptor) - PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR; -#endif /* defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) - PFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR; -#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_ray_tracing_pipeline) - PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR; - PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR; - PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR; - PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR; - PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR; - PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR; - PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR; -#endif /* defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_sampler_ycbcr_conversion) - PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR; - PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR; -#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */ -#if defined(VK_KHR_shared_presentable_image) - PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR; -#endif /* defined(VK_KHR_shared_presentable_image) */ -#if defined(VK_KHR_swapchain) - PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; - PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; - PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; - PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; - PFN_vkQueuePresentKHR vkQueuePresentKHR; -#endif /* defined(VK_KHR_swapchain) */ -#if defined(VK_KHR_synchronization2) - PFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR; - PFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR; - PFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR; - PFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR; - PFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR; - PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR; -#endif /* defined(VK_KHR_synchronization2) */ -#if defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) - PFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD; -#endif /* defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) */ -#if defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) - PFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV; -#endif /* defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_KHR_timeline_semaphore) - PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR; - PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR; - PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR; -#endif /* defined(VK_KHR_timeline_semaphore) */ -#if defined(VK_KHR_video_decode_queue) - PFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR; -#endif /* defined(VK_KHR_video_decode_queue) */ -#if defined(VK_KHR_video_encode_queue) - PFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR; - PFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR; -#endif /* defined(VK_KHR_video_encode_queue) */ -#if defined(VK_KHR_video_queue) - PFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR; - PFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR; - PFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR; - PFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR; - PFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR; - PFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR; - PFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR; - PFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR; - PFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR; - PFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR; -#endif /* defined(VK_KHR_video_queue) */ -#if defined(VK_NVX_binary_import) - PFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX; - PFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX; - PFN_vkCreateCuModuleNVX vkCreateCuModuleNVX; - PFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX; - PFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX; -#endif /* defined(VK_NVX_binary_import) */ -#if defined(VK_NVX_image_view_handle) - PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX; - PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX; -#endif /* defined(VK_NVX_image_view_handle) */ -#if defined(VK_NV_clip_space_w_scaling) - PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV; -#endif /* defined(VK_NV_clip_space_w_scaling) */ -#if defined(VK_NV_copy_memory_indirect) - PFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV; - PFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV; -#endif /* defined(VK_NV_copy_memory_indirect) */ -#if defined(VK_NV_cuda_kernel_launch) - PFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV; - PFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV; - PFN_vkCreateCudaModuleNV vkCreateCudaModuleNV; - PFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV; - PFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV; - PFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV; -#endif /* defined(VK_NV_cuda_kernel_launch) */ -#if defined(VK_NV_device_diagnostic_checkpoints) - PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV; - PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV; -#endif /* defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_NV_device_generated_commands) - PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV; - PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV; - PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV; - PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV; - PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV; - PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV; -#endif /* defined(VK_NV_device_generated_commands) */ -#if defined(VK_NV_device_generated_commands_compute) - PFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV; - PFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV; - PFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV; -#endif /* defined(VK_NV_device_generated_commands_compute) */ -#if defined(VK_NV_external_memory_rdma) - PFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV; -#endif /* defined(VK_NV_external_memory_rdma) */ -#if defined(VK_NV_external_memory_win32) - PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV; -#endif /* defined(VK_NV_external_memory_win32) */ -#if defined(VK_NV_fragment_shading_rate_enums) - PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV; -#endif /* defined(VK_NV_fragment_shading_rate_enums) */ -#if defined(VK_NV_low_latency2) - PFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV; - PFN_vkLatencySleepNV vkLatencySleepNV; - PFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV; - PFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV; - PFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV; -#endif /* defined(VK_NV_low_latency2) */ -#if defined(VK_NV_memory_decompression) - PFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV; - PFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV; -#endif /* defined(VK_NV_memory_decompression) */ -#if defined(VK_NV_mesh_shader) - PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV; - PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV; - PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV; -#endif /* defined(VK_NV_mesh_shader) */ -#if defined(VK_NV_optical_flow) - PFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV; - PFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV; - PFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV; - PFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV; -#endif /* defined(VK_NV_optical_flow) */ -#if defined(VK_NV_ray_tracing) - PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV; - PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV; - PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV; - PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV; - PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV; - PFN_vkCompileDeferredNV vkCompileDeferredNV; - PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV; - PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV; - PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV; - PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV; - PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV; - PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV; -#endif /* defined(VK_NV_ray_tracing) */ -#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 - PFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV; -#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */ -#if defined(VK_NV_scissor_exclusive) - PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV; -#endif /* defined(VK_NV_scissor_exclusive) */ -#if defined(VK_NV_shading_rate_image) - PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV; - PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV; - PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV; -#endif /* defined(VK_NV_shading_rate_image) */ -#if defined(VK_QCOM_tile_properties) - PFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM; - PFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM; -#endif /* defined(VK_QCOM_tile_properties) */ -#if defined(VK_QNX_external_memory_screen_buffer) - PFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX; -#endif /* defined(VK_QNX_external_memory_screen_buffer) */ -#if defined(VK_VALVE_descriptor_set_host_mapping) - PFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE; - PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE; -#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */ -#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) - PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT; - PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT; - PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT; - PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT; - PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT; - PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT; - PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT; - PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT; - PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT; - PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT; - PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT; - PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) - PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT; - PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT; - PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT; - PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT; - PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) - PFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT; - PFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT; - PFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT; - PFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT; - PFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT; - PFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT; - PFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT; - PFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT; - PFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT; - PFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT; - PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT; - PFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT; - PFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT; - PFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT; - PFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT; - PFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT; - PFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT; - PFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT; - PFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT; - PFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT; - PFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) - PFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) - PFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) - PFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV; - PFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) - PFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV; - PFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV; - PFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) - PFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) - PFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) - PFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */ -#if (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1)) - PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT; -#endif /* (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) - PFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT; -#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */ -#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) - PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT; -#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */ -#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template)) - PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR; -#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) - PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR; - PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR; -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) - PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR; -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ - /* VOLK_GENERATE_DEVICE_TABLE */ -}; - -/* VOLK_GENERATE_PROTOTYPES_H */ -#if defined(VK_VERSION_1_0) -extern PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; -extern PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; -extern PFN_vkAllocateMemory vkAllocateMemory; -extern PFN_vkBeginCommandBuffer vkBeginCommandBuffer; -extern PFN_vkBindBufferMemory vkBindBufferMemory; -extern PFN_vkBindImageMemory vkBindImageMemory; -extern PFN_vkCmdBeginQuery vkCmdBeginQuery; -extern PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass; -extern PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; -extern PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; -extern PFN_vkCmdBindPipeline vkCmdBindPipeline; -extern PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers; -extern PFN_vkCmdBlitImage vkCmdBlitImage; -extern PFN_vkCmdClearAttachments vkCmdClearAttachments; -extern PFN_vkCmdClearColorImage vkCmdClearColorImage; -extern PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage; -extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; -extern PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; -extern PFN_vkCmdCopyImage vkCmdCopyImage; -extern PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; -extern PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults; -extern PFN_vkCmdDispatch vkCmdDispatch; -extern PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect; -extern PFN_vkCmdDraw vkCmdDraw; -extern PFN_vkCmdDrawIndexed vkCmdDrawIndexed; -extern PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect; -extern PFN_vkCmdDrawIndirect vkCmdDrawIndirect; -extern PFN_vkCmdEndQuery vkCmdEndQuery; -extern PFN_vkCmdEndRenderPass vkCmdEndRenderPass; -extern PFN_vkCmdExecuteCommands vkCmdExecuteCommands; -extern PFN_vkCmdFillBuffer vkCmdFillBuffer; -extern PFN_vkCmdNextSubpass vkCmdNextSubpass; -extern PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; -extern PFN_vkCmdPushConstants vkCmdPushConstants; -extern PFN_vkCmdResetEvent vkCmdResetEvent; -extern PFN_vkCmdResetQueryPool vkCmdResetQueryPool; -extern PFN_vkCmdResolveImage vkCmdResolveImage; -extern PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; -extern PFN_vkCmdSetDepthBias vkCmdSetDepthBias; -extern PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; -extern PFN_vkCmdSetEvent vkCmdSetEvent; -extern PFN_vkCmdSetLineWidth vkCmdSetLineWidth; -extern PFN_vkCmdSetScissor vkCmdSetScissor; -extern PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; -extern PFN_vkCmdSetStencilReference vkCmdSetStencilReference; -extern PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; -extern PFN_vkCmdSetViewport vkCmdSetViewport; -extern PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer; -extern PFN_vkCmdWaitEvents vkCmdWaitEvents; -extern PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp; -extern PFN_vkCreateBuffer vkCreateBuffer; -extern PFN_vkCreateBufferView vkCreateBufferView; -extern PFN_vkCreateCommandPool vkCreateCommandPool; -extern PFN_vkCreateComputePipelines vkCreateComputePipelines; -extern PFN_vkCreateDescriptorPool vkCreateDescriptorPool; -extern PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; -extern PFN_vkCreateDevice vkCreateDevice; -extern PFN_vkCreateEvent vkCreateEvent; -extern PFN_vkCreateFence vkCreateFence; -extern PFN_vkCreateFramebuffer vkCreateFramebuffer; -extern PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; -extern PFN_vkCreateImage vkCreateImage; -extern PFN_vkCreateImageView vkCreateImageView; -extern PFN_vkCreateInstance vkCreateInstance; -extern PFN_vkCreatePipelineCache vkCreatePipelineCache; -extern PFN_vkCreatePipelineLayout vkCreatePipelineLayout; -extern PFN_vkCreateQueryPool vkCreateQueryPool; -extern PFN_vkCreateRenderPass vkCreateRenderPass; -extern PFN_vkCreateSampler vkCreateSampler; -extern PFN_vkCreateSemaphore vkCreateSemaphore; -extern PFN_vkCreateShaderModule vkCreateShaderModule; -extern PFN_vkDestroyBuffer vkDestroyBuffer; -extern PFN_vkDestroyBufferView vkDestroyBufferView; -extern PFN_vkDestroyCommandPool vkDestroyCommandPool; -extern PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; -extern PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; -extern PFN_vkDestroyDevice vkDestroyDevice; -extern PFN_vkDestroyEvent vkDestroyEvent; -extern PFN_vkDestroyFence vkDestroyFence; -extern PFN_vkDestroyFramebuffer vkDestroyFramebuffer; -extern PFN_vkDestroyImage vkDestroyImage; -extern PFN_vkDestroyImageView vkDestroyImageView; -extern PFN_vkDestroyInstance vkDestroyInstance; -extern PFN_vkDestroyPipeline vkDestroyPipeline; -extern PFN_vkDestroyPipelineCache vkDestroyPipelineCache; -extern PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; -extern PFN_vkDestroyQueryPool vkDestroyQueryPool; -extern PFN_vkDestroyRenderPass vkDestroyRenderPass; -extern PFN_vkDestroySampler vkDestroySampler; -extern PFN_vkDestroySemaphore vkDestroySemaphore; -extern PFN_vkDestroyShaderModule vkDestroyShaderModule; -extern PFN_vkDeviceWaitIdle vkDeviceWaitIdle; -extern PFN_vkEndCommandBuffer vkEndCommandBuffer; -extern PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties; -extern PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties; -extern PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties; -extern PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties; -extern PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices; -extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; -extern PFN_vkFreeCommandBuffers vkFreeCommandBuffers; -extern PFN_vkFreeDescriptorSets vkFreeDescriptorSets; -extern PFN_vkFreeMemory vkFreeMemory; -extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; -extern PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment; -extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; -extern PFN_vkGetDeviceQueue vkGetDeviceQueue; -extern PFN_vkGetEventStatus vkGetEventStatus; -extern PFN_vkGetFenceStatus vkGetFenceStatus; -extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; -extern PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements; -extern PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout; -extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; -extern PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures; -extern PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties; -extern PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties; -extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; -extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; -extern PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties; -extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties; -extern PFN_vkGetPipelineCacheData vkGetPipelineCacheData; -extern PFN_vkGetQueryPoolResults vkGetQueryPoolResults; -extern PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity; -extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; -extern PFN_vkMapMemory vkMapMemory; -extern PFN_vkMergePipelineCaches vkMergePipelineCaches; -extern PFN_vkQueueBindSparse vkQueueBindSparse; -extern PFN_vkQueueSubmit vkQueueSubmit; -extern PFN_vkQueueWaitIdle vkQueueWaitIdle; -extern PFN_vkResetCommandBuffer vkResetCommandBuffer; -extern PFN_vkResetCommandPool vkResetCommandPool; -extern PFN_vkResetDescriptorPool vkResetDescriptorPool; -extern PFN_vkResetEvent vkResetEvent; -extern PFN_vkResetFences vkResetFences; -extern PFN_vkSetEvent vkSetEvent; -extern PFN_vkUnmapMemory vkUnmapMemory; -extern PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; -extern PFN_vkWaitForFences vkWaitForFences; -#endif /* defined(VK_VERSION_1_0) */ -#if defined(VK_VERSION_1_1) -extern PFN_vkBindBufferMemory2 vkBindBufferMemory2; -extern PFN_vkBindImageMemory2 vkBindImageMemory2; -extern PFN_vkCmdDispatchBase vkCmdDispatchBase; -extern PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask; -extern PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate; -extern PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion; -extern PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate; -extern PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion; -extern PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion; -extern PFN_vkEnumeratePhysicalDeviceGroups vkEnumeratePhysicalDeviceGroups; -extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; -extern PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport; -extern PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures; -extern PFN_vkGetDeviceQueue2 vkGetDeviceQueue2; -extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; -extern PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2; -extern PFN_vkGetPhysicalDeviceExternalBufferProperties vkGetPhysicalDeviceExternalBufferProperties; -extern PFN_vkGetPhysicalDeviceExternalFenceProperties vkGetPhysicalDeviceExternalFenceProperties; -extern PFN_vkGetPhysicalDeviceExternalSemaphoreProperties vkGetPhysicalDeviceExternalSemaphoreProperties; -extern PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2; -extern PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2; -extern PFN_vkGetPhysicalDeviceImageFormatProperties2 vkGetPhysicalDeviceImageFormatProperties2; -extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; -extern PFN_vkGetPhysicalDeviceProperties2 vkGetPhysicalDeviceProperties2; -extern PFN_vkGetPhysicalDeviceQueueFamilyProperties2 vkGetPhysicalDeviceQueueFamilyProperties2; -extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 vkGetPhysicalDeviceSparseImageFormatProperties2; -extern PFN_vkTrimCommandPool vkTrimCommandPool; -extern PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate; -#endif /* defined(VK_VERSION_1_1) */ -#if defined(VK_VERSION_1_2) -extern PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2; -extern PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount; -extern PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount; -extern PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2; -extern PFN_vkCmdNextSubpass2 vkCmdNextSubpass2; -extern PFN_vkCreateRenderPass2 vkCreateRenderPass2; -extern PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress; -extern PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress; -extern PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress; -extern PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue; -extern PFN_vkResetQueryPool vkResetQueryPool; -extern PFN_vkSignalSemaphore vkSignalSemaphore; -extern PFN_vkWaitSemaphores vkWaitSemaphores; -#endif /* defined(VK_VERSION_1_2) */ -#if defined(VK_VERSION_1_3) -extern PFN_vkCmdBeginRendering vkCmdBeginRendering; -extern PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2; -extern PFN_vkCmdBlitImage2 vkCmdBlitImage2; -extern PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2; -extern PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2; -extern PFN_vkCmdCopyImage2 vkCmdCopyImage2; -extern PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2; -extern PFN_vkCmdEndRendering vkCmdEndRendering; -extern PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2; -extern PFN_vkCmdResetEvent2 vkCmdResetEvent2; -extern PFN_vkCmdResolveImage2 vkCmdResolveImage2; -extern PFN_vkCmdSetCullMode vkCmdSetCullMode; -extern PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable; -extern PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable; -extern PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp; -extern PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable; -extern PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable; -extern PFN_vkCmdSetEvent2 vkCmdSetEvent2; -extern PFN_vkCmdSetFrontFace vkCmdSetFrontFace; -extern PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable; -extern PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology; -extern PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable; -extern PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount; -extern PFN_vkCmdSetStencilOp vkCmdSetStencilOp; -extern PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable; -extern PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount; -extern PFN_vkCmdWaitEvents2 vkCmdWaitEvents2; -extern PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2; -extern PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot; -extern PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot; -extern PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements; -extern PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements; -extern PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements; -extern PFN_vkGetPhysicalDeviceToolProperties vkGetPhysicalDeviceToolProperties; -extern PFN_vkGetPrivateData vkGetPrivateData; -extern PFN_vkQueueSubmit2 vkQueueSubmit2; -extern PFN_vkSetPrivateData vkSetPrivateData; -#endif /* defined(VK_VERSION_1_3) */ -#if defined(VK_AMDX_shader_enqueue) -extern PFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX; -extern PFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX; -extern PFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX; -extern PFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX; -extern PFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX; -extern PFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX; -extern PFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX; -#endif /* defined(VK_AMDX_shader_enqueue) */ -#if defined(VK_AMD_buffer_marker) -extern PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD; -#endif /* defined(VK_AMD_buffer_marker) */ -#if defined(VK_AMD_display_native_hdr) -extern PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD; -#endif /* defined(VK_AMD_display_native_hdr) */ -#if defined(VK_AMD_draw_indirect_count) -extern PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD; -extern PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD; -#endif /* defined(VK_AMD_draw_indirect_count) */ -#if defined(VK_AMD_shader_info) -extern PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD; -#endif /* defined(VK_AMD_shader_info) */ -#if defined(VK_ANDROID_external_memory_android_hardware_buffer) -extern PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID; -extern PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID; -#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */ -#if defined(VK_EXT_acquire_drm_display) -extern PFN_vkAcquireDrmDisplayEXT vkAcquireDrmDisplayEXT; -extern PFN_vkGetDrmDisplayEXT vkGetDrmDisplayEXT; -#endif /* defined(VK_EXT_acquire_drm_display) */ -#if defined(VK_EXT_acquire_xlib_display) -extern PFN_vkAcquireXlibDisplayEXT vkAcquireXlibDisplayEXT; -extern PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT; -#endif /* defined(VK_EXT_acquire_xlib_display) */ -#if defined(VK_EXT_attachment_feedback_loop_dynamic_state) -extern PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT; -#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */ -#if defined(VK_EXT_buffer_device_address) -extern PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT; -#endif /* defined(VK_EXT_buffer_device_address) */ -#if defined(VK_EXT_calibrated_timestamps) -extern PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT; -extern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT vkGetPhysicalDeviceCalibrateableTimeDomainsEXT; -#endif /* defined(VK_EXT_calibrated_timestamps) */ -#if defined(VK_EXT_color_write_enable) -extern PFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT; -#endif /* defined(VK_EXT_color_write_enable) */ -#if defined(VK_EXT_conditional_rendering) -extern PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT; -extern PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT; -#endif /* defined(VK_EXT_conditional_rendering) */ -#if defined(VK_EXT_debug_marker) -extern PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT; -extern PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT; -extern PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT; -extern PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT; -extern PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT; -#endif /* defined(VK_EXT_debug_marker) */ -#if defined(VK_EXT_debug_report) -extern PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT; -extern PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT; -extern PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT; -#endif /* defined(VK_EXT_debug_report) */ -#if defined(VK_EXT_debug_utils) -extern PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT; -extern PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT; -extern PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT; -extern PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT; -extern PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT; -extern PFN_vkQueueBeginDebugUtilsLabelEXT vkQueueBeginDebugUtilsLabelEXT; -extern PFN_vkQueueEndDebugUtilsLabelEXT vkQueueEndDebugUtilsLabelEXT; -extern PFN_vkQueueInsertDebugUtilsLabelEXT vkQueueInsertDebugUtilsLabelEXT; -extern PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT; -extern PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT; -extern PFN_vkSubmitDebugUtilsMessageEXT vkSubmitDebugUtilsMessageEXT; -#endif /* defined(VK_EXT_debug_utils) */ -#if defined(VK_EXT_depth_bias_control) -extern PFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT; -#endif /* defined(VK_EXT_depth_bias_control) */ -#if defined(VK_EXT_descriptor_buffer) -extern PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT; -extern PFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT; -extern PFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT; -extern PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT; -extern PFN_vkGetDescriptorEXT vkGetDescriptorEXT; -extern PFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT; -extern PFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT; -extern PFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT; -extern PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT; -extern PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT; -#endif /* defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) -extern PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT; -#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */ -#if defined(VK_EXT_device_fault) -extern PFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT; -#endif /* defined(VK_EXT_device_fault) */ -#if defined(VK_EXT_direct_mode_display) -extern PFN_vkReleaseDisplayEXT vkReleaseDisplayEXT; -#endif /* defined(VK_EXT_direct_mode_display) */ -#if defined(VK_EXT_directfb_surface) -extern PFN_vkCreateDirectFBSurfaceEXT vkCreateDirectFBSurfaceEXT; -extern PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT vkGetPhysicalDeviceDirectFBPresentationSupportEXT; -#endif /* defined(VK_EXT_directfb_surface) */ -#if defined(VK_EXT_discard_rectangles) -extern PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT; -#endif /* defined(VK_EXT_discard_rectangles) */ -#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 -extern PFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT; -extern PFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT; -#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */ -#if defined(VK_EXT_display_control) -extern PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT; -extern PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT; -extern PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT; -extern PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT; -#endif /* defined(VK_EXT_display_control) */ -#if defined(VK_EXT_display_surface_counter) -extern PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT vkGetPhysicalDeviceSurfaceCapabilities2EXT; -#endif /* defined(VK_EXT_display_surface_counter) */ -#if defined(VK_EXT_external_memory_host) -extern PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT; -#endif /* defined(VK_EXT_external_memory_host) */ -#if defined(VK_EXT_full_screen_exclusive) -extern PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT; -extern PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT vkGetPhysicalDeviceSurfacePresentModes2EXT; -extern PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT; -#endif /* defined(VK_EXT_full_screen_exclusive) */ -#if defined(VK_EXT_hdr_metadata) -extern PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT; -#endif /* defined(VK_EXT_hdr_metadata) */ -#if defined(VK_EXT_headless_surface) -extern PFN_vkCreateHeadlessSurfaceEXT vkCreateHeadlessSurfaceEXT; -#endif /* defined(VK_EXT_headless_surface) */ -#if defined(VK_EXT_host_image_copy) -extern PFN_vkCopyImageToImageEXT vkCopyImageToImageEXT; -extern PFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT; -extern PFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT; -extern PFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT; -#endif /* defined(VK_EXT_host_image_copy) */ -#if defined(VK_EXT_host_query_reset) -extern PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT; -#endif /* defined(VK_EXT_host_query_reset) */ -#if defined(VK_EXT_image_drm_format_modifier) -extern PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT; -#endif /* defined(VK_EXT_image_drm_format_modifier) */ -#if defined(VK_EXT_line_rasterization) -extern PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT; -#endif /* defined(VK_EXT_line_rasterization) */ -#if defined(VK_EXT_mesh_shader) -extern PFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT; -extern PFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT; -extern PFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT; -#endif /* defined(VK_EXT_mesh_shader) */ -#if defined(VK_EXT_metal_objects) -extern PFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT; -#endif /* defined(VK_EXT_metal_objects) */ -#if defined(VK_EXT_metal_surface) -extern PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT; -#endif /* defined(VK_EXT_metal_surface) */ -#if defined(VK_EXT_multi_draw) -extern PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT; -extern PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT; -#endif /* defined(VK_EXT_multi_draw) */ -#if defined(VK_EXT_opacity_micromap) -extern PFN_vkBuildMicromapsEXT vkBuildMicromapsEXT; -extern PFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT; -extern PFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT; -extern PFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT; -extern PFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT; -extern PFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT; -extern PFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT; -extern PFN_vkCopyMicromapEXT vkCopyMicromapEXT; -extern PFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT; -extern PFN_vkCreateMicromapEXT vkCreateMicromapEXT; -extern PFN_vkDestroyMicromapEXT vkDestroyMicromapEXT; -extern PFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT; -extern PFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT; -extern PFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT; -#endif /* defined(VK_EXT_opacity_micromap) */ -#if defined(VK_EXT_pageable_device_local_memory) -extern PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT; -#endif /* defined(VK_EXT_pageable_device_local_memory) */ -#if defined(VK_EXT_pipeline_properties) -extern PFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT; -#endif /* defined(VK_EXT_pipeline_properties) */ -#if defined(VK_EXT_private_data) -extern PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT; -extern PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT; -extern PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT; -extern PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT; -#endif /* defined(VK_EXT_private_data) */ -#if defined(VK_EXT_sample_locations) -extern PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT; -extern PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT; -#endif /* defined(VK_EXT_sample_locations) */ -#if defined(VK_EXT_shader_module_identifier) -extern PFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT; -extern PFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT; -#endif /* defined(VK_EXT_shader_module_identifier) */ -#if defined(VK_EXT_shader_object) -extern PFN_vkCmdBindShadersEXT vkCmdBindShadersEXT; -extern PFN_vkCreateShadersEXT vkCreateShadersEXT; -extern PFN_vkDestroyShaderEXT vkDestroyShaderEXT; -extern PFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT; -#endif /* defined(VK_EXT_shader_object) */ -#if defined(VK_EXT_swapchain_maintenance1) -extern PFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT; -#endif /* defined(VK_EXT_swapchain_maintenance1) */ -#if defined(VK_EXT_tooling_info) -extern PFN_vkGetPhysicalDeviceToolPropertiesEXT vkGetPhysicalDeviceToolPropertiesEXT; -#endif /* defined(VK_EXT_tooling_info) */ -#if defined(VK_EXT_transform_feedback) -extern PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT; -extern PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT; -extern PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT; -extern PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT; -extern PFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT; -extern PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT; -#endif /* defined(VK_EXT_transform_feedback) */ -#if defined(VK_EXT_validation_cache) -extern PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT; -extern PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT; -extern PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT; -extern PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT; -#endif /* defined(VK_EXT_validation_cache) */ -#if defined(VK_FUCHSIA_buffer_collection) -extern PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA; -extern PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA; -extern PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA; -extern PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA; -extern PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA; -#endif /* defined(VK_FUCHSIA_buffer_collection) */ -#if defined(VK_FUCHSIA_external_memory) -extern PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA; -extern PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA; -#endif /* defined(VK_FUCHSIA_external_memory) */ -#if defined(VK_FUCHSIA_external_semaphore) -extern PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA; -extern PFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA; -#endif /* defined(VK_FUCHSIA_external_semaphore) */ -#if defined(VK_FUCHSIA_imagepipe_surface) -extern PFN_vkCreateImagePipeSurfaceFUCHSIA vkCreateImagePipeSurfaceFUCHSIA; -#endif /* defined(VK_FUCHSIA_imagepipe_surface) */ -#if defined(VK_GGP_stream_descriptor_surface) -extern PFN_vkCreateStreamDescriptorSurfaceGGP vkCreateStreamDescriptorSurfaceGGP; -#endif /* defined(VK_GGP_stream_descriptor_surface) */ -#if defined(VK_GOOGLE_display_timing) -extern PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE; -extern PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE; -#endif /* defined(VK_GOOGLE_display_timing) */ -#if defined(VK_HUAWEI_cluster_culling_shader) -extern PFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI; -extern PFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI; -#endif /* defined(VK_HUAWEI_cluster_culling_shader) */ -#if defined(VK_HUAWEI_invocation_mask) -extern PFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI; -#endif /* defined(VK_HUAWEI_invocation_mask) */ -#if defined(VK_HUAWEI_subpass_shading) -extern PFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI; -extern PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI; -#endif /* defined(VK_HUAWEI_subpass_shading) */ -#if defined(VK_INTEL_performance_query) -extern PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL; -extern PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL; -extern PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL; -extern PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL; -extern PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL; -extern PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL; -extern PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL; -extern PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL; -extern PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL; -#endif /* defined(VK_INTEL_performance_query) */ -#if defined(VK_KHR_acceleration_structure) -extern PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR; -extern PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR; -extern PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR; -extern PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR; -extern PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR; -extern PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR; -extern PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR; -extern PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR; -extern PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR; -extern PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR; -extern PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR; -extern PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR; -extern PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR; -extern PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR; -extern PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR; -extern PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR; -#endif /* defined(VK_KHR_acceleration_structure) */ -#if defined(VK_KHR_android_surface) -extern PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR; -#endif /* defined(VK_KHR_android_surface) */ -#if defined(VK_KHR_bind_memory2) -extern PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR; -extern PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR; -#endif /* defined(VK_KHR_bind_memory2) */ -#if defined(VK_KHR_buffer_device_address) -extern PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR; -extern PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR; -extern PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR; -#endif /* defined(VK_KHR_buffer_device_address) */ -#if defined(VK_KHR_cooperative_matrix) -extern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR; -#endif /* defined(VK_KHR_cooperative_matrix) */ -#if defined(VK_KHR_copy_commands2) -extern PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR; -extern PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR; -extern PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR; -extern PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR; -extern PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR; -extern PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR; -#endif /* defined(VK_KHR_copy_commands2) */ -#if defined(VK_KHR_create_renderpass2) -extern PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR; -extern PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR; -extern PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR; -extern PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR; -#endif /* defined(VK_KHR_create_renderpass2) */ -#if defined(VK_KHR_deferred_host_operations) -extern PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR; -extern PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR; -extern PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR; -extern PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR; -extern PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR; -#endif /* defined(VK_KHR_deferred_host_operations) */ -#if defined(VK_KHR_descriptor_update_template) -extern PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR; -extern PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR; -extern PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR; -#endif /* defined(VK_KHR_descriptor_update_template) */ -#if defined(VK_KHR_device_group) -extern PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR; -extern PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR; -extern PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR; -#endif /* defined(VK_KHR_device_group) */ -#if defined(VK_KHR_device_group_creation) -extern PFN_vkEnumeratePhysicalDeviceGroupsKHR vkEnumeratePhysicalDeviceGroupsKHR; -#endif /* defined(VK_KHR_device_group_creation) */ -#if defined(VK_KHR_display) -extern PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR; -extern PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR; -extern PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR; -extern PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR; -extern PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR; -extern PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR; -extern PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR; -#endif /* defined(VK_KHR_display) */ -#if defined(VK_KHR_display_swapchain) -extern PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR; -#endif /* defined(VK_KHR_display_swapchain) */ -#if defined(VK_KHR_draw_indirect_count) -extern PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR; -extern PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR; -#endif /* defined(VK_KHR_draw_indirect_count) */ -#if defined(VK_KHR_dynamic_rendering) -extern PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR; -extern PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR; -#endif /* defined(VK_KHR_dynamic_rendering) */ -#if defined(VK_KHR_external_fence_capabilities) -extern PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR; -#endif /* defined(VK_KHR_external_fence_capabilities) */ -#if defined(VK_KHR_external_fence_fd) -extern PFN_vkGetFenceFdKHR vkGetFenceFdKHR; -extern PFN_vkImportFenceFdKHR vkImportFenceFdKHR; -#endif /* defined(VK_KHR_external_fence_fd) */ -#if defined(VK_KHR_external_fence_win32) -extern PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR; -extern PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR; -#endif /* defined(VK_KHR_external_fence_win32) */ -#if defined(VK_KHR_external_memory_capabilities) -extern PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR; -#endif /* defined(VK_KHR_external_memory_capabilities) */ -#if defined(VK_KHR_external_memory_fd) -extern PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR; -extern PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR; -#endif /* defined(VK_KHR_external_memory_fd) */ -#if defined(VK_KHR_external_memory_win32) -extern PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR; -extern PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR; -#endif /* defined(VK_KHR_external_memory_win32) */ -#if defined(VK_KHR_external_semaphore_capabilities) -extern PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR vkGetPhysicalDeviceExternalSemaphorePropertiesKHR; -#endif /* defined(VK_KHR_external_semaphore_capabilities) */ -#if defined(VK_KHR_external_semaphore_fd) -extern PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR; -extern PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR; -#endif /* defined(VK_KHR_external_semaphore_fd) */ -#if defined(VK_KHR_external_semaphore_win32) -extern PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR; -extern PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR; -#endif /* defined(VK_KHR_external_semaphore_win32) */ -#if defined(VK_KHR_fragment_shading_rate) -extern PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR; -extern PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR vkGetPhysicalDeviceFragmentShadingRatesKHR; -#endif /* defined(VK_KHR_fragment_shading_rate) */ -#if defined(VK_KHR_get_display_properties2) -extern PFN_vkGetDisplayModeProperties2KHR vkGetDisplayModeProperties2KHR; -extern PFN_vkGetDisplayPlaneCapabilities2KHR vkGetDisplayPlaneCapabilities2KHR; -extern PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR vkGetPhysicalDeviceDisplayPlaneProperties2KHR; -extern PFN_vkGetPhysicalDeviceDisplayProperties2KHR vkGetPhysicalDeviceDisplayProperties2KHR; -#endif /* defined(VK_KHR_get_display_properties2) */ -#if defined(VK_KHR_get_memory_requirements2) -extern PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR; -extern PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR; -extern PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR; -#endif /* defined(VK_KHR_get_memory_requirements2) */ -#if defined(VK_KHR_get_physical_device_properties2) -extern PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR; -extern PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR; -extern PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR; -extern PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR; -extern PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR; -extern PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR; -extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR; -#endif /* defined(VK_KHR_get_physical_device_properties2) */ -#if defined(VK_KHR_get_surface_capabilities2) -extern PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR; -extern PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR; -#endif /* defined(VK_KHR_get_surface_capabilities2) */ -#if defined(VK_KHR_maintenance1) -extern PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR; -#endif /* defined(VK_KHR_maintenance1) */ -#if defined(VK_KHR_maintenance3) -extern PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR; -#endif /* defined(VK_KHR_maintenance3) */ -#if defined(VK_KHR_maintenance4) -extern PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR; -extern PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR; -extern PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR; -#endif /* defined(VK_KHR_maintenance4) */ -#if defined(VK_KHR_maintenance5) -extern PFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR; -extern PFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR; -extern PFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR; -extern PFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR; -#endif /* defined(VK_KHR_maintenance5) */ -#if defined(VK_KHR_map_memory2) -extern PFN_vkMapMemory2KHR vkMapMemory2KHR; -extern PFN_vkUnmapMemory2KHR vkUnmapMemory2KHR; -#endif /* defined(VK_KHR_map_memory2) */ -#if defined(VK_KHR_performance_query) -extern PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR; -extern PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR; -extern PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR; -extern PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR; -#endif /* defined(VK_KHR_performance_query) */ -#if defined(VK_KHR_pipeline_executable_properties) -extern PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR; -extern PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR; -extern PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR; -#endif /* defined(VK_KHR_pipeline_executable_properties) */ -#if defined(VK_KHR_present_wait) -extern PFN_vkWaitForPresentKHR vkWaitForPresentKHR; -#endif /* defined(VK_KHR_present_wait) */ -#if defined(VK_KHR_push_descriptor) -extern PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR; -#endif /* defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) -extern PFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR; -#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_ray_tracing_pipeline) -extern PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR; -extern PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR; -extern PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR; -extern PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR; -extern PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR; -extern PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR; -extern PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR; -#endif /* defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_sampler_ycbcr_conversion) -extern PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR; -extern PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR; -#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */ -#if defined(VK_KHR_shared_presentable_image) -extern PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR; -#endif /* defined(VK_KHR_shared_presentable_image) */ -#if defined(VK_KHR_surface) -extern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; -extern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR; -extern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR; -extern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR; -extern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; -#endif /* defined(VK_KHR_surface) */ -#if defined(VK_KHR_swapchain) -extern PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; -extern PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; -extern PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; -extern PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; -extern PFN_vkQueuePresentKHR vkQueuePresentKHR; -#endif /* defined(VK_KHR_swapchain) */ -#if defined(VK_KHR_synchronization2) -extern PFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR; -extern PFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR; -extern PFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR; -extern PFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR; -extern PFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR; -extern PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR; -#endif /* defined(VK_KHR_synchronization2) */ -#if defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) -extern PFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD; -#endif /* defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) */ -#if defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) -extern PFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV; -#endif /* defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_KHR_timeline_semaphore) -extern PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR; -extern PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR; -extern PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR; -#endif /* defined(VK_KHR_timeline_semaphore) */ -#if defined(VK_KHR_video_decode_queue) -extern PFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR; -#endif /* defined(VK_KHR_video_decode_queue) */ -#if defined(VK_KHR_video_encode_queue) -extern PFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR; -extern PFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR; -extern PFN_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR; -#endif /* defined(VK_KHR_video_encode_queue) */ -#if defined(VK_KHR_video_queue) -extern PFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR; -extern PFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR; -extern PFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR; -extern PFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR; -extern PFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR; -extern PFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR; -extern PFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR; -extern PFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR; -extern PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR vkGetPhysicalDeviceVideoCapabilitiesKHR; -extern PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR vkGetPhysicalDeviceVideoFormatPropertiesKHR; -extern PFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR; -extern PFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR; -#endif /* defined(VK_KHR_video_queue) */ -#if defined(VK_KHR_wayland_surface) -extern PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR; -extern PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR; -#endif /* defined(VK_KHR_wayland_surface) */ -#if defined(VK_KHR_win32_surface) -extern PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR; -extern PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR; -#endif /* defined(VK_KHR_win32_surface) */ -#if defined(VK_KHR_xcb_surface) -extern PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR; -extern PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR; -#endif /* defined(VK_KHR_xcb_surface) */ -#if defined(VK_KHR_xlib_surface) -extern PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR; -extern PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR; -#endif /* defined(VK_KHR_xlib_surface) */ -#if defined(VK_MVK_ios_surface) -extern PFN_vkCreateIOSSurfaceMVK vkCreateIOSSurfaceMVK; -#endif /* defined(VK_MVK_ios_surface) */ -#if defined(VK_MVK_macos_surface) -extern PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK; -#endif /* defined(VK_MVK_macos_surface) */ -#if defined(VK_NN_vi_surface) -extern PFN_vkCreateViSurfaceNN vkCreateViSurfaceNN; -#endif /* defined(VK_NN_vi_surface) */ -#if defined(VK_NVX_binary_import) -extern PFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX; -extern PFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX; -extern PFN_vkCreateCuModuleNVX vkCreateCuModuleNVX; -extern PFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX; -extern PFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX; -#endif /* defined(VK_NVX_binary_import) */ -#if defined(VK_NVX_image_view_handle) -extern PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX; -extern PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX; -#endif /* defined(VK_NVX_image_view_handle) */ -#if defined(VK_NV_acquire_winrt_display) -extern PFN_vkAcquireWinrtDisplayNV vkAcquireWinrtDisplayNV; -extern PFN_vkGetWinrtDisplayNV vkGetWinrtDisplayNV; -#endif /* defined(VK_NV_acquire_winrt_display) */ -#if defined(VK_NV_clip_space_w_scaling) -extern PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV; -#endif /* defined(VK_NV_clip_space_w_scaling) */ -#if defined(VK_NV_cooperative_matrix) -extern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV vkGetPhysicalDeviceCooperativeMatrixPropertiesNV; -#endif /* defined(VK_NV_cooperative_matrix) */ -#if defined(VK_NV_copy_memory_indirect) -extern PFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV; -extern PFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV; -#endif /* defined(VK_NV_copy_memory_indirect) */ -#if defined(VK_NV_coverage_reduction_mode) -extern PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV; -#endif /* defined(VK_NV_coverage_reduction_mode) */ -#if defined(VK_NV_cuda_kernel_launch) -extern PFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV; -extern PFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV; -extern PFN_vkCreateCudaModuleNV vkCreateCudaModuleNV; -extern PFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV; -extern PFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV; -extern PFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV; -#endif /* defined(VK_NV_cuda_kernel_launch) */ -#if defined(VK_NV_device_diagnostic_checkpoints) -extern PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV; -extern PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV; -#endif /* defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_NV_device_generated_commands) -extern PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV; -extern PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV; -extern PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV; -extern PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV; -extern PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV; -extern PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV; -#endif /* defined(VK_NV_device_generated_commands) */ -#if defined(VK_NV_device_generated_commands_compute) -extern PFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV; -extern PFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV; -extern PFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV; -#endif /* defined(VK_NV_device_generated_commands_compute) */ -#if defined(VK_NV_external_memory_capabilities) -extern PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV vkGetPhysicalDeviceExternalImageFormatPropertiesNV; -#endif /* defined(VK_NV_external_memory_capabilities) */ -#if defined(VK_NV_external_memory_rdma) -extern PFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV; -#endif /* defined(VK_NV_external_memory_rdma) */ -#if defined(VK_NV_external_memory_win32) -extern PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV; -#endif /* defined(VK_NV_external_memory_win32) */ -#if defined(VK_NV_fragment_shading_rate_enums) -extern PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV; -#endif /* defined(VK_NV_fragment_shading_rate_enums) */ -#if defined(VK_NV_low_latency2) -extern PFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV; -extern PFN_vkLatencySleepNV vkLatencySleepNV; -extern PFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV; -extern PFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV; -extern PFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV; -#endif /* defined(VK_NV_low_latency2) */ -#if defined(VK_NV_memory_decompression) -extern PFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV; -extern PFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV; -#endif /* defined(VK_NV_memory_decompression) */ -#if defined(VK_NV_mesh_shader) -extern PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV; -extern PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV; -extern PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV; -#endif /* defined(VK_NV_mesh_shader) */ -#if defined(VK_NV_optical_flow) -extern PFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV; -extern PFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV; -extern PFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV; -extern PFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV; -extern PFN_vkGetPhysicalDeviceOpticalFlowImageFormatsNV vkGetPhysicalDeviceOpticalFlowImageFormatsNV; -#endif /* defined(VK_NV_optical_flow) */ -#if defined(VK_NV_ray_tracing) -extern PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV; -extern PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV; -extern PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV; -extern PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV; -extern PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV; -extern PFN_vkCompileDeferredNV vkCompileDeferredNV; -extern PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV; -extern PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV; -extern PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV; -extern PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV; -extern PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV; -extern PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV; -#endif /* defined(VK_NV_ray_tracing) */ -#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 -extern PFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV; -#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */ -#if defined(VK_NV_scissor_exclusive) -extern PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV; -#endif /* defined(VK_NV_scissor_exclusive) */ -#if defined(VK_NV_shading_rate_image) -extern PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV; -extern PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV; -extern PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV; -#endif /* defined(VK_NV_shading_rate_image) */ -#if defined(VK_QCOM_tile_properties) -extern PFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM; -extern PFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM; -#endif /* defined(VK_QCOM_tile_properties) */ -#if defined(VK_QNX_external_memory_screen_buffer) -extern PFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX; -#endif /* defined(VK_QNX_external_memory_screen_buffer) */ -#if defined(VK_QNX_screen_surface) -extern PFN_vkCreateScreenSurfaceQNX vkCreateScreenSurfaceQNX; -extern PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX vkGetPhysicalDeviceScreenPresentationSupportQNX; -#endif /* defined(VK_QNX_screen_surface) */ -#if defined(VK_VALVE_descriptor_set_host_mapping) -extern PFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE; -extern PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE; -#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */ -#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) -extern PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT; -extern PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT; -extern PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT; -extern PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT; -extern PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT; -extern PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT; -extern PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT; -extern PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT; -extern PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT; -extern PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT; -extern PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT; -extern PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) -extern PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT; -extern PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT; -extern PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT; -extern PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT; -extern PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) -extern PFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT; -extern PFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT; -extern PFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT; -extern PFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT; -extern PFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT; -extern PFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT; -extern PFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT; -extern PFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT; -extern PFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT; -extern PFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT; -extern PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT; -extern PFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT; -extern PFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT; -extern PFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT; -extern PFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT; -extern PFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT; -extern PFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT; -extern PFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT; -extern PFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT; -extern PFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT; -extern PFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) -extern PFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) -extern PFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) -extern PFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV; -extern PFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) -extern PFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV; -extern PFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV; -extern PFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) -extern PFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) -extern PFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) -extern PFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */ -#if (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1)) -extern PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT; -#endif /* (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) -extern PFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT; -#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */ -#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) -extern PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT; -#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */ -#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template)) -extern PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR; -#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) -extern PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR; -extern PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR; -extern PFN_vkGetPhysicalDevicePresentRectanglesKHR vkGetPhysicalDevicePresentRectanglesKHR; -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) -extern PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR; -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ -/* VOLK_GENERATE_PROTOTYPES_H */ - -#ifdef __cplusplus -} -#endif - -#endif - -#ifdef VOLK_IMPLEMENTATION -#undef VOLK_IMPLEMENTATION -/* Prevent tools like dependency checkers from detecting a cyclic dependency */ -#define VOLK_SOURCE "volk.c" -#include VOLK_SOURCE -#endif - -/** - * Copyright (c) 2018-2023 Arseny Kapoulkine - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. -*/ -/* clang-format on */ diff --git a/third_party/vulkan/vulkan.cppm b/third_party/vulkan/vulkan.cppm index 0b7fafb..60b70f0 100644 --- a/third_party/vulkan/vulkan.cppm +++ b/third_party/vulkan/vulkan.cppm @@ -62,6 +62,8 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::UniqueHandle; #endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + using VULKAN_HPP_NAMESPACE::exchange; + //================== //=== BASE TYPEs === //================== @@ -236,37 +238,48 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::ChromaLocationKHR; using VULKAN_HPP_NAMESPACE::CommandPoolTrimFlagBits; using VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags; + using VULKAN_HPP_NAMESPACE::CommandPoolTrimFlagsKHR; using VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateFlagBits; using VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateFlags; + using VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateFlagsKHR; using VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateType; using VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateTypeKHR; using VULKAN_HPP_NAMESPACE::ExternalFenceFeatureFlagBits; using VULKAN_HPP_NAMESPACE::ExternalFenceFeatureFlagBitsKHR; using VULKAN_HPP_NAMESPACE::ExternalFenceFeatureFlags; + using VULKAN_HPP_NAMESPACE::ExternalFenceFeatureFlagsKHR; using VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits; using VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBitsKHR; using VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlags; + using VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagsKHR; using VULKAN_HPP_NAMESPACE::ExternalMemoryFeatureFlagBits; using VULKAN_HPP_NAMESPACE::ExternalMemoryFeatureFlagBitsKHR; using VULKAN_HPP_NAMESPACE::ExternalMemoryFeatureFlags; + using VULKAN_HPP_NAMESPACE::ExternalMemoryFeatureFlagsKHR; using VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits; using VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBitsKHR; using VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags; + using VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsKHR; using VULKAN_HPP_NAMESPACE::ExternalSemaphoreFeatureFlagBits; using VULKAN_HPP_NAMESPACE::ExternalSemaphoreFeatureFlagBitsKHR; using VULKAN_HPP_NAMESPACE::ExternalSemaphoreFeatureFlags; + using VULKAN_HPP_NAMESPACE::ExternalSemaphoreFeatureFlagsKHR; using VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits; using VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBitsKHR; using VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlags; + using VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagsKHR; using VULKAN_HPP_NAMESPACE::FenceImportFlagBits; using VULKAN_HPP_NAMESPACE::FenceImportFlagBitsKHR; using VULKAN_HPP_NAMESPACE::FenceImportFlags; + using VULKAN_HPP_NAMESPACE::FenceImportFlagsKHR; using VULKAN_HPP_NAMESPACE::MemoryAllocateFlagBits; using VULKAN_HPP_NAMESPACE::MemoryAllocateFlagBitsKHR; using VULKAN_HPP_NAMESPACE::MemoryAllocateFlags; + using VULKAN_HPP_NAMESPACE::MemoryAllocateFlagsKHR; using VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlagBits; using VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlagBitsKHR; using VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags; + using VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlagsKHR; using VULKAN_HPP_NAMESPACE::PointClippingBehavior; using VULKAN_HPP_NAMESPACE::PointClippingBehaviorKHR; using VULKAN_HPP_NAMESPACE::SamplerYcbcrModelConversion; @@ -276,6 +289,7 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::SemaphoreImportFlagBits; using VULKAN_HPP_NAMESPACE::SemaphoreImportFlagBitsKHR; using VULKAN_HPP_NAMESPACE::SemaphoreImportFlags; + using VULKAN_HPP_NAMESPACE::SemaphoreImportFlagsKHR; using VULKAN_HPP_NAMESPACE::SubgroupFeatureFlagBits; using VULKAN_HPP_NAMESPACE::SubgroupFeatureFlags; using VULKAN_HPP_NAMESPACE::TessellationDomainOrigin; @@ -285,11 +299,13 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::DescriptorBindingFlagBits; using VULKAN_HPP_NAMESPACE::DescriptorBindingFlagBitsEXT; using VULKAN_HPP_NAMESPACE::DescriptorBindingFlags; + using VULKAN_HPP_NAMESPACE::DescriptorBindingFlagsEXT; using VULKAN_HPP_NAMESPACE::DriverId; using VULKAN_HPP_NAMESPACE::DriverIdKHR; using VULKAN_HPP_NAMESPACE::ResolveModeFlagBits; using VULKAN_HPP_NAMESPACE::ResolveModeFlagBitsKHR; using VULKAN_HPP_NAMESPACE::ResolveModeFlags; + using VULKAN_HPP_NAMESPACE::ResolveModeFlagsKHR; using VULKAN_HPP_NAMESPACE::SamplerReductionMode; using VULKAN_HPP_NAMESPACE::SamplerReductionModeEXT; using VULKAN_HPP_NAMESPACE::SemaphoreType; @@ -297,6 +313,7 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::SemaphoreWaitFlagBits; using VULKAN_HPP_NAMESPACE::SemaphoreWaitFlagBitsKHR; using VULKAN_HPP_NAMESPACE::SemaphoreWaitFlags; + using VULKAN_HPP_NAMESPACE::SemaphoreWaitFlagsKHR; using VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence; using VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependenceKHR; @@ -304,27 +321,35 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::AccessFlagBits2; using VULKAN_HPP_NAMESPACE::AccessFlagBits2KHR; using VULKAN_HPP_NAMESPACE::AccessFlags2; + using VULKAN_HPP_NAMESPACE::AccessFlags2KHR; using VULKAN_HPP_NAMESPACE::FormatFeatureFlagBits2; using VULKAN_HPP_NAMESPACE::FormatFeatureFlagBits2KHR; using VULKAN_HPP_NAMESPACE::FormatFeatureFlags2; + using VULKAN_HPP_NAMESPACE::FormatFeatureFlags2KHR; using VULKAN_HPP_NAMESPACE::PipelineCreationFeedbackFlagBits; using VULKAN_HPP_NAMESPACE::PipelineCreationFeedbackFlagBitsEXT; using VULKAN_HPP_NAMESPACE::PipelineCreationFeedbackFlags; + using VULKAN_HPP_NAMESPACE::PipelineCreationFeedbackFlagsEXT; using VULKAN_HPP_NAMESPACE::PipelineStageFlagBits2; using VULKAN_HPP_NAMESPACE::PipelineStageFlagBits2KHR; using VULKAN_HPP_NAMESPACE::PipelineStageFlags2; + using VULKAN_HPP_NAMESPACE::PipelineStageFlags2KHR; using VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateFlagBits; using VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateFlagBitsEXT; using VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateFlags; + using VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateFlagsEXT; using VULKAN_HPP_NAMESPACE::RenderingFlagBits; using VULKAN_HPP_NAMESPACE::RenderingFlagBitsKHR; using VULKAN_HPP_NAMESPACE::RenderingFlags; + using VULKAN_HPP_NAMESPACE::RenderingFlagsKHR; using VULKAN_HPP_NAMESPACE::SubmitFlagBits; using VULKAN_HPP_NAMESPACE::SubmitFlagBitsKHR; using VULKAN_HPP_NAMESPACE::SubmitFlags; + using VULKAN_HPP_NAMESPACE::SubmitFlagsKHR; using VULKAN_HPP_NAMESPACE::ToolPurposeFlagBits; using VULKAN_HPP_NAMESPACE::ToolPurposeFlagBitsEXT; using VULKAN_HPP_NAMESPACE::ToolPurposeFlags; + using VULKAN_HPP_NAMESPACE::ToolPurposeFlagsEXT; //=== VK_KHR_surface === using VULKAN_HPP_NAMESPACE::ColorSpaceKHR; @@ -551,15 +576,18 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagBitsKHR; using VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagBitsNV; using VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsKHR; + using VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsNV; using VULKAN_HPP_NAMESPACE::BuildAccelerationStructureModeKHR; using VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR; using VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeNV; using VULKAN_HPP_NAMESPACE::GeometryFlagBitsKHR; using VULKAN_HPP_NAMESPACE::GeometryFlagBitsNV; using VULKAN_HPP_NAMESPACE::GeometryFlagsKHR; + using VULKAN_HPP_NAMESPACE::GeometryFlagsNV; using VULKAN_HPP_NAMESPACE::GeometryInstanceFlagBitsKHR; using VULKAN_HPP_NAMESPACE::GeometryInstanceFlagBitsNV; using VULKAN_HPP_NAMESPACE::GeometryInstanceFlagsKHR; + using VULKAN_HPP_NAMESPACE::GeometryInstanceFlagsNV; using VULKAN_HPP_NAMESPACE::GeometryTypeKHR; using VULKAN_HPP_NAMESPACE::GeometryTypeNV; @@ -806,6 +834,10 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::PipelineCreateFlagBits2KHR; using VULKAN_HPP_NAMESPACE::PipelineCreateFlags2KHR; + //=== VK_AMD_anti_lag === + using VULKAN_HPP_NAMESPACE::AntiLagModeAMD; + using VULKAN_HPP_NAMESPACE::AntiLagStageAMD; + //=== VK_EXT_shader_object === using VULKAN_HPP_NAMESPACE::ShaderCodeTypeEXT; using VULKAN_HPP_NAMESPACE::ShaderCreateFlagBitsEXT; @@ -844,6 +876,9 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::TimeDomainEXT; using VULKAN_HPP_NAMESPACE::TimeDomainKHR; + //=== VK_KHR_maintenance7 === + using VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiKHR; + //========================= //=== Index Type Traits === //========================= @@ -906,11 +941,9 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::CompressionExhaustedEXTError; using VULKAN_HPP_NAMESPACE::InvalidVideoStdParametersKHRError; + using VULKAN_HPP_NAMESPACE::NotEnoughSpaceKHRError; #endif /*VULKAN_HPP_NO_EXCEPTIONS*/ - using VULKAN_HPP_NAMESPACE::createResultValueType; - using VULKAN_HPP_NAMESPACE::ignore; - using VULKAN_HPP_NAMESPACE::resultCheck; using VULKAN_HPP_NAMESPACE::ResultValue; using VULKAN_HPP_NAMESPACE::ResultValueType; @@ -2355,6 +2388,10 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::KHRMaintenance5ExtensionName; using VULKAN_HPP_NAMESPACE::KHRMaintenance5SpecVersion; + //=== VK_AMD_anti_lag === + using VULKAN_HPP_NAMESPACE::AMDAntiLagExtensionName; + using VULKAN_HPP_NAMESPACE::AMDAntiLagSpecVersion; + //=== VK_KHR_ray_tracing_position_fetch === using VULKAN_HPP_NAMESPACE::KHRRayTracingPositionFetchExtensionName; using VULKAN_HPP_NAMESPACE::KHRRayTracingPositionFetchSpecVersion; @@ -2363,6 +2400,11 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::EXTShaderObjectExtensionName; using VULKAN_HPP_NAMESPACE::EXTShaderObjectSpecVersion; + //=== VK_KHR_pipeline_binary === + using VULKAN_HPP_NAMESPACE::KHRPipelineBinaryExtensionName; + using VULKAN_HPP_NAMESPACE::KHRPipelineBinarySpecVersion; + using VULKAN_HPP_NAMESPACE::MaxPipelineBinaryKeySizeKHR; + //=== VK_QCOM_tile_properties === using VULKAN_HPP_NAMESPACE::QCOMTilePropertiesExtensionName; using VULKAN_HPP_NAMESPACE::QCOMTilePropertiesSpecVersion; @@ -2387,6 +2429,10 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::EXTMutableDescriptorTypeExtensionName; using VULKAN_HPP_NAMESPACE::EXTMutableDescriptorTypeSpecVersion; + //=== VK_EXT_legacy_vertex_attributes === + using VULKAN_HPP_NAMESPACE::EXTLegacyVertexAttributesExtensionName; + using VULKAN_HPP_NAMESPACE::EXTLegacyVertexAttributesSpecVersion; + //=== VK_EXT_layer_settings === using VULKAN_HPP_NAMESPACE::EXTLayerSettingsExtensionName; using VULKAN_HPP_NAMESPACE::EXTLayerSettingsSpecVersion; @@ -2415,6 +2461,10 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::QCOMMultiviewPerViewRenderAreasExtensionName; using VULKAN_HPP_NAMESPACE::QCOMMultiviewPerViewRenderAreasSpecVersion; + //=== VK_KHR_compute_shader_derivatives === + using VULKAN_HPP_NAMESPACE::KHRComputeShaderDerivativesExtensionName; + using VULKAN_HPP_NAMESPACE::KHRComputeShaderDerivativesSpecVersion; + //=== VK_KHR_video_decode_av1 === using VULKAN_HPP_NAMESPACE::KHRVideoDecodeAv1ExtensionName; using VULKAN_HPP_NAMESPACE::KHRVideoDecodeAv1SpecVersion; @@ -2498,14 +2548,34 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::NVRawAccessChainsExtensionName; using VULKAN_HPP_NAMESPACE::NVRawAccessChainsSpecVersion; + //=== VK_KHR_shader_relaxed_extended_instruction === + using VULKAN_HPP_NAMESPACE::KHRShaderRelaxedExtendedInstructionExtensionName; + using VULKAN_HPP_NAMESPACE::KHRShaderRelaxedExtendedInstructionSpecVersion; + + //=== VK_NV_command_buffer_inheritance === + using VULKAN_HPP_NAMESPACE::NVCommandBufferInheritanceExtensionName; + using VULKAN_HPP_NAMESPACE::NVCommandBufferInheritanceSpecVersion; + + //=== VK_KHR_maintenance7 === + using VULKAN_HPP_NAMESPACE::KHRMaintenance7ExtensionName; + using VULKAN_HPP_NAMESPACE::KHRMaintenance7SpecVersion; + //=== VK_NV_shader_atomic_float16_vector === using VULKAN_HPP_NAMESPACE::NVShaderAtomicFloat16VectorExtensionName; using VULKAN_HPP_NAMESPACE::NVShaderAtomicFloat16VectorSpecVersion; + //=== VK_EXT_shader_replicated_composites === + using VULKAN_HPP_NAMESPACE::EXTShaderReplicatedCompositesExtensionName; + using VULKAN_HPP_NAMESPACE::EXTShaderReplicatedCompositesSpecVersion; + //=== VK_NV_ray_tracing_validation === using VULKAN_HPP_NAMESPACE::NVRayTracingValidationExtensionName; using VULKAN_HPP_NAMESPACE::NVRayTracingValidationSpecVersion; + //=== VK_MESA_image_alignment_control === + using VULKAN_HPP_NAMESPACE::MESAImageAlignmentControlExtensionName; + using VULKAN_HPP_NAMESPACE::MESAImageAlignmentControlSpecVersion; + //======================== //=== CONSTEXPR VALUEs === //======================== @@ -3520,9 +3590,6 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::PresentFrameTokenGGP; #endif /*VK_USE_PLATFORM_GGP*/ - //=== VK_NV_compute_shader_derivatives === - using VULKAN_HPP_NAMESPACE::PhysicalDeviceComputeShaderDerivativesFeaturesNV; - //=== VK_NV_mesh_shader === using VULKAN_HPP_NAMESPACE::DrawMeshTasksIndirectCommandNV; using VULKAN_HPP_NAMESPACE::PhysicalDeviceMeshShaderFeaturesNV; @@ -4183,6 +4250,11 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::SubresourceLayout2EXT; using VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR; + //=== VK_AMD_anti_lag === + using VULKAN_HPP_NAMESPACE::AntiLagDataAMD; + using VULKAN_HPP_NAMESPACE::AntiLagPresentationInfoAMD; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceAntiLagFeaturesAMD; + //=== VK_KHR_ray_tracing_position_fetch === using VULKAN_HPP_NAMESPACE::PhysicalDeviceRayTracingPositionFetchFeaturesKHR; @@ -4191,6 +4263,20 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderObjectPropertiesEXT; using VULKAN_HPP_NAMESPACE::ShaderCreateInfoEXT; + //=== VK_KHR_pipeline_binary === + using VULKAN_HPP_NAMESPACE::DevicePipelineBinaryInternalCacheControlKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineBinaryFeaturesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineBinaryPropertiesKHR; + using VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR; + using VULKAN_HPP_NAMESPACE::PipelineBinaryDataInfoKHR; + using VULKAN_HPP_NAMESPACE::PipelineBinaryDataKHR; + using VULKAN_HPP_NAMESPACE::PipelineBinaryHandlesInfoKHR; + using VULKAN_HPP_NAMESPACE::PipelineBinaryInfoKHR; + using VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR; + using VULKAN_HPP_NAMESPACE::PipelineBinaryKeysAndDataKHR; + using VULKAN_HPP_NAMESPACE::PipelineCreateInfoKHR; + using VULKAN_HPP_NAMESPACE::ReleaseCapturedPipelineDataInfoKHR; + //=== VK_QCOM_tile_properties === using VULKAN_HPP_NAMESPACE::PhysicalDeviceTilePropertiesFeaturesQCOM; using VULKAN_HPP_NAMESPACE::TilePropertiesQCOM; @@ -4218,6 +4304,10 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::PhysicalDeviceMutableDescriptorTypeFeaturesEXT; using VULKAN_HPP_NAMESPACE::PhysicalDeviceMutableDescriptorTypeFeaturesVALVE; + //=== VK_EXT_legacy_vertex_attributes === + using VULKAN_HPP_NAMESPACE::PhysicalDeviceLegacyVertexAttributesFeaturesEXT; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceLegacyVertexAttributesPropertiesEXT; + //=== VK_EXT_layer_settings === using VULKAN_HPP_NAMESPACE::LayerSettingEXT; using VULKAN_HPP_NAMESPACE::LayerSettingsCreateInfoEXT; @@ -4252,6 +4342,11 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::MultiviewPerViewRenderAreasRenderPassBeginInfoQCOM; using VULKAN_HPP_NAMESPACE::PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM; + //=== VK_KHR_compute_shader_derivatives === + using VULKAN_HPP_NAMESPACE::PhysicalDeviceComputeShaderDerivativesFeaturesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceComputeShaderDerivativesFeaturesNV; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceComputeShaderDerivativesPropertiesKHR; + //=== VK_KHR_video_decode_av1 === using VULKAN_HPP_NAMESPACE::VideoDecodeAV1CapabilitiesKHR; using VULKAN_HPP_NAMESPACE::VideoDecodeAV1DpbSlotInfoKHR; @@ -4346,12 +4441,33 @@ export namespace VULKAN_HPP_NAMESPACE //=== VK_NV_raw_access_chains === using VULKAN_HPP_NAMESPACE::PhysicalDeviceRawAccessChainsFeaturesNV; + //=== VK_KHR_shader_relaxed_extended_instruction === + using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR; + + //=== VK_NV_command_buffer_inheritance === + using VULKAN_HPP_NAMESPACE::PhysicalDeviceCommandBufferInheritanceFeaturesNV; + + //=== VK_KHR_maintenance7 === + using VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiPropertiesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiPropertiesListKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiVulkanPropertiesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance7FeaturesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance7PropertiesKHR; + //=== VK_NV_shader_atomic_float16_vector === using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderAtomicFloat16VectorFeaturesNV; + //=== VK_EXT_shader_replicated_composites === + using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderReplicatedCompositesFeaturesEXT; + //=== VK_NV_ray_tracing_validation === using VULKAN_HPP_NAMESPACE::PhysicalDeviceRayTracingValidationFeaturesNV; + //=== VK_MESA_image_alignment_control === + using VULKAN_HPP_NAMESPACE::ImageAlignmentControlCreateInfoMESA; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceImageAlignmentControlFeaturesMESA; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceImageAlignmentControlPropertiesMESA; + //=============== //=== HANDLEs === //=============== @@ -4454,6 +4570,9 @@ export namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_shader_object === using VULKAN_HPP_NAMESPACE::ShaderEXT; + //=== VK_KHR_pipeline_binary === + using VULKAN_HPP_NAMESPACE::PipelineBinaryKHR; + //====================== //=== UNIQUE HANDLEs === //====================== @@ -4551,8 +4670,11 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::UniqueOpticalFlowSessionNV; //=== VK_EXT_shader_object === - using VULKAN_HPP_NAMESPACE::UniqueHandleTraits; using VULKAN_HPP_NAMESPACE::UniqueShaderEXT; + + //=== VK_KHR_pipeline_binary === + using VULKAN_HPP_NAMESPACE::UniqueHandleTraits; + using VULKAN_HPP_NAMESPACE::UniquePipelineBinaryKHR; #endif /*VULKAN_HPP_NO_SMART_HANDLE*/ //====================== @@ -4655,8 +4777,11 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::SharedOpticalFlowSessionNV; //=== VK_EXT_shader_object === - using VULKAN_HPP_NAMESPACE::SharedHandleTraits; using VULKAN_HPP_NAMESPACE::SharedShaderEXT; + + //=== VK_KHR_pipeline_binary === + using VULKAN_HPP_NAMESPACE::SharedHandleTraits; + using VULKAN_HPP_NAMESPACE::SharedPipelineBinaryKHR; #endif /*VULKAN_HPP_NO_SMART_HANDLE*/ //=========================== @@ -4675,7 +4800,7 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::StructExtends; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ -#if defined( VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL ) +#if VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL using VULKAN_HPP_NAMESPACE::DynamicLoader; #endif /*VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL*/ @@ -4728,7 +4853,6 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_RAII_NAMESPACE::Context; using VULKAN_HPP_RAII_NAMESPACE::ContextDispatcher; using VULKAN_HPP_RAII_NAMESPACE::DeviceDispatcher; - using VULKAN_HPP_RAII_NAMESPACE::exchange; using VULKAN_HPP_RAII_NAMESPACE::InstanceDispatcher; //==================== @@ -4838,6 +4962,10 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_RAII_NAMESPACE::ShaderEXT; using VULKAN_HPP_RAII_NAMESPACE::ShaderEXTs; + //=== VK_KHR_pipeline_binary === + using VULKAN_HPP_RAII_NAMESPACE::PipelineBinaryKHR; + using VULKAN_HPP_RAII_NAMESPACE::PipelineBinaryKHRs; + } // namespace VULKAN_HPP_RAII_NAMESPACE #endif } // namespace VULKAN_HPP_NAMESPACE diff --git a/third_party/vulkan/vulkan.hpp b/third_party/vulkan/vulkan.hpp index f456a7a..d5744c1 100644 --- a/third_party/vulkan/vulkan.hpp +++ b/third_party/vulkan/vulkan.hpp @@ -12,6 +12,7 @@ #include // ArrayWrapperND #include // strnlen #include // std::string +#include // std::exchange #include #include @@ -56,7 +57,7 @@ extern "C" __declspec( dllimport ) FARPROC __stdcall GetProcAddress( HINSTANCE h # include #endif -static_assert( VK_HEADER_VERSION == 281, "Wrong VK_HEADER_VERSION!" ); +static_assert( VK_HEADER_VERSION == 295, "Wrong VK_HEADER_VERSION!" ); // includes through some other header // this results in major(x) being resolved to gnu_dev_major(x) @@ -146,66 +147,72 @@ namespace VULKAN_HPP_NAMESPACE } #endif -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - template ::value, int>::type = 0> - std::strong_ordering operator<=>( ArrayWrapper1D const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return *static_cast const *>( this ) <=> *static_cast const *>( &rhs ); - } -#else - template ::value, int>::type = 0> - bool operator<( ArrayWrapper1D const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return *static_cast const *>( this ) < *static_cast const *>( &rhs ); - } - - template ::value, int>::type = 0> - bool operator<=( ArrayWrapper1D const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return *static_cast const *>( this ) <= *static_cast const *>( &rhs ); - } - - template ::value, int>::type = 0> - bool operator>( ArrayWrapper1D const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return *static_cast const *>( this ) > *static_cast const *>( &rhs ); - } - - template ::value, int>::type = 0> - bool operator>=( ArrayWrapper1D const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return *static_cast const *>( this ) >= *static_cast const *>( &rhs ); - } -#endif - - template ::value, int>::type = 0> - bool operator==( ArrayWrapper1D const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return *static_cast const *>( this ) == *static_cast const *>( &rhs ); - } - - template ::value, int>::type = 0> - bool operator!=( ArrayWrapper1D const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return *static_cast const *>( this ) != *static_cast const *>( &rhs ); - } - private: VULKAN_HPP_CONSTEXPR_14 void copy( char const * data, size_t len ) VULKAN_HPP_NOEXCEPT { - size_t n = std::min( N, len ); + size_t n = ( std::min )( N - 1, len ); for ( size_t i = 0; i < n; ++i ) { ( *this )[i] = data[i]; } - for ( size_t i = n; i < N; ++i ) - { - ( *this )[i] = 0; - } + ( *this )[n] = 0; } }; - // specialization of relational operators between std::string and arrays of chars +// relational operators between ArrayWrapper1D of chars with potentially different sizes +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + template + std::strong_ordering operator<=>( ArrayWrapper1D const & lhs, ArrayWrapper1D const & rhs ) VULKAN_HPP_NOEXCEPT + { + int result = strcmp( lhs.data(), rhs.data() ); + return ( result < 0 ) ? std::strong_ordering::less : ( ( result > 0 ) ? std::strong_ordering::greater : std::strong_ordering::equal ); + } +#else + template + bool operator<( ArrayWrapper1D const & lhs, ArrayWrapper1D const & rhs ) VULKAN_HPP_NOEXCEPT + { + return strcmp( lhs.data(), rhs.data() ) < 0; + } + + template + bool operator<=( ArrayWrapper1D const & lhs, ArrayWrapper1D const & rhs ) VULKAN_HPP_NOEXCEPT + { + return strcmp( lhs.data(), rhs.data() ) <= 0; + } + + template + bool operator>( ArrayWrapper1D const & lhs, ArrayWrapper1D const & rhs ) VULKAN_HPP_NOEXCEPT + { + return strcmp( lhs.data(), rhs.data() ) > 0; + } + + template + bool operator>=( ArrayWrapper1D const & lhs, ArrayWrapper1D const & rhs ) VULKAN_HPP_NOEXCEPT + { + return strcmp( lhs.data(), rhs.data() ) >= 0; + } +#endif + + template + bool operator==( ArrayWrapper1D const & lhs, ArrayWrapper1D const & rhs ) VULKAN_HPP_NOEXCEPT + { + return strcmp( lhs.data(), rhs.data() ) == 0; + } + + template + bool operator!=( ArrayWrapper1D const & lhs, ArrayWrapper1D const & rhs ) VULKAN_HPP_NOEXCEPT + { + return strcmp( lhs.data(), rhs.data() ) != 0; + } + +// specialization of relational operators between std::string and arrays of chars +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + template + std::strong_ordering operator<=>( std::string const & lhs, ArrayWrapper1D const & rhs ) VULKAN_HPP_NOEXCEPT + { + return lhs <=> rhs.data(); + } +#else template bool operator<( std::string const & lhs, ArrayWrapper1D const & rhs ) VULKAN_HPP_NOEXCEPT { @@ -229,6 +236,7 @@ namespace VULKAN_HPP_NAMESPACE { return lhs >= rhs.data(); } +#endif template bool operator==( std::string const & lhs, ArrayWrapper1D const & rhs ) VULKAN_HPP_NOEXCEPT @@ -381,38 +389,19 @@ namespace VULKAN_HPP_NAMESPACE { } - ArrayProxyNoTemporaries( T & value ) VULKAN_HPP_NOEXCEPT + template ::value && std::is_lvalue_reference::value, int>::type = 0> + ArrayProxyNoTemporaries( B && value ) VULKAN_HPP_NOEXCEPT : m_count( 1 ) , m_ptr( &value ) { } - template - ArrayProxyNoTemporaries( V && value ) = delete; - - template ::value, int>::type = 0> - ArrayProxyNoTemporaries( typename std::remove_const::type & value ) VULKAN_HPP_NOEXCEPT - : m_count( 1 ) - , m_ptr( &value ) - { - } - - template ::value, int>::type = 0> - ArrayProxyNoTemporaries( typename std::remove_const::type && value ) = delete; - ArrayProxyNoTemporaries( uint32_t count, T * ptr ) VULKAN_HPP_NOEXCEPT : m_count( count ) , m_ptr( ptr ) { } - template ::value, int>::type = 0> - ArrayProxyNoTemporaries( uint32_t count, typename std::remove_const::type * ptr ) VULKAN_HPP_NOEXCEPT - : m_count( count ) - , m_ptr( ptr ) - { - } - template ArrayProxyNoTemporaries( T ( &ptr )[C] ) VULKAN_HPP_NOEXCEPT : m_count( C ) @@ -423,62 +412,29 @@ namespace VULKAN_HPP_NAMESPACE template ArrayProxyNoTemporaries( T ( &&ptr )[C] ) = delete; - template ::value, int>::type = 0> - ArrayProxyNoTemporaries( typename std::remove_const::type ( &ptr )[C] ) VULKAN_HPP_NOEXCEPT - : m_count( C ) - , m_ptr( ptr ) - { - } - - template ::value, int>::type = 0> - ArrayProxyNoTemporaries( typename std::remove_const::type ( &&ptr )[C] ) = delete; - - ArrayProxyNoTemporaries( std::initializer_list const & list ) VULKAN_HPP_NOEXCEPT - : m_count( static_cast( list.size() ) ) - , m_ptr( list.begin() ) - { - } - - ArrayProxyNoTemporaries( std::initializer_list const && list ) = delete; - - template ::value, int>::type = 0> - ArrayProxyNoTemporaries( std::initializer_list::type> const & list ) VULKAN_HPP_NOEXCEPT - : m_count( static_cast( list.size() ) ) - , m_ptr( list.begin() ) - { - } - - template ::value, int>::type = 0> - ArrayProxyNoTemporaries( std::initializer_list::type> const && list ) = delete; - - ArrayProxyNoTemporaries( std::initializer_list & list ) VULKAN_HPP_NOEXCEPT - : m_count( static_cast( list.size() ) ) - , m_ptr( list.begin() ) - { - } - - ArrayProxyNoTemporaries( std::initializer_list && list ) = delete; - - template ::value, int>::type = 0> - ArrayProxyNoTemporaries( std::initializer_list::type> & list ) VULKAN_HPP_NOEXCEPT - : m_count( static_cast( list.size() ) ) - , m_ptr( list.begin() ) - { - } - - template ::value, int>::type = 0> - ArrayProxyNoTemporaries( std::initializer_list::type> && list ) = delete; - - // Any type with a .data() return type implicitly convertible to T*, and a .size() return type implicitly convertible to size_t. + // Any l-value reference with a .data() return type implicitly convertible to T*, and a .size() return type implicitly convertible to size_t. template ().data() ), T *>::value && - std::is_convertible().size() ), std::size_t>::value>::type * = nullptr> - ArrayProxyNoTemporaries( V & v ) VULKAN_HPP_NOEXCEPT + typename std::enable_if().begin() ), T *>::value && + std::is_convertible().data() ), T *>::value && + std::is_convertible().size() ), std::size_t>::value && std::is_lvalue_reference::value, + int>::type = 0> + ArrayProxyNoTemporaries( V && v ) VULKAN_HPP_NOEXCEPT : m_count( static_cast( v.size() ) ) , m_ptr( v.data() ) { } + // Any l-value reference with a .begin() return type implicitly convertible to T*, and a .size() return type implicitly convertible to size_t. + template ().begin() ), T *>::value && + std::is_convertible().size() ), std::size_t>::value && std::is_lvalue_reference::value, + int>::type = 0> + ArrayProxyNoTemporaries( V && v ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( v.size() ) ) + , m_ptr( v.begin() ) + { + } + const T * begin() const VULKAN_HPP_NOEXCEPT { return m_ptr; @@ -652,6 +608,8 @@ namespace VULKAN_HPP_NAMESPACE template class StructureChain : public std::tuple { + // Note: StructureChain has no move constructor or move assignment operator, as it is not supposed to contain movable containers. + // In order to get a copy-operation on a move-operations, those functions are neither deleted nor defaulted. public: StructureChain() VULKAN_HPP_NOEXCEPT { @@ -668,15 +626,6 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &std::get<0>( rhs ) ) ); } - StructureChain( StructureChain && rhs ) VULKAN_HPP_NOEXCEPT : std::tuple( std::forward>( rhs ) ) - { - static_assert( StructureChainValidation::valid, "The structure chain is not valid!" ); - link( &std::get<0>( *this ), - &std::get<0>( rhs ), - reinterpret_cast( &std::get<0>( *this ) ), - reinterpret_cast( &std::get<0>( rhs ) ) ); - } - StructureChain( ChainElements const &... elems ) VULKAN_HPP_NOEXCEPT : std::tuple( elems... ) { static_assert( StructureChainValidation::valid, "The structure chain is not valid!" ); @@ -693,8 +642,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } - StructureChain & operator=( StructureChain && rhs ) = delete; - template >::type, size_t Which = 0> T & get() VULKAN_HPP_NOEXCEPT { @@ -4395,9 +4342,9 @@ namespace VULKAN_HPP_NAMESPACE } void vkCmdSetRenderingInputAttachmentIndicesKHR( VkCommandBuffer commandBuffer, - const VkRenderingInputAttachmentIndexInfoKHR * pLocationInfo ) const VULKAN_HPP_NOEXCEPT + const VkRenderingInputAttachmentIndexInfoKHR * pInputAttachmentIndexInfo ) const VULKAN_HPP_NOEXCEPT { - return ::vkCmdSetRenderingInputAttachmentIndicesKHR( commandBuffer, pLocationInfo ); + return ::vkCmdSetRenderingInputAttachmentIndicesKHR( commandBuffer, pInputAttachmentIndexInfo ); } //=== VK_EXT_buffer_device_address === @@ -5759,6 +5706,13 @@ namespace VULKAN_HPP_NAMESPACE return ::vkGetImageSubresourceLayout2KHR( device, image, pSubresource, pLayout ); } + //=== VK_AMD_anti_lag === + + void vkAntiLagUpdateAMD( VkDevice device, const VkAntiLagDataAMD * pData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkAntiLagUpdateAMD( device, pData ); + } + //=== VK_EXT_shader_object === VkResult vkCreateShadersEXT( VkDevice device, @@ -5788,6 +5742,44 @@ namespace VULKAN_HPP_NAMESPACE return ::vkCmdBindShadersEXT( commandBuffer, stageCount, pStages, pShaders ); } + //=== VK_KHR_pipeline_binary === + + VkResult vkCreatePipelineBinariesKHR( VkDevice device, + const VkPipelineBinaryCreateInfoKHR * pCreateInfo, + const VkAllocationCallbacks * pAllocator, + VkPipelineBinaryHandlesInfoKHR * pBinaries ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreatePipelineBinariesKHR( device, pCreateInfo, pAllocator, pBinaries ); + } + + void vkDestroyPipelineBinaryKHR( VkDevice device, VkPipelineBinaryKHR pipelineBinary, const VkAllocationCallbacks * pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyPipelineBinaryKHR( device, pipelineBinary, pAllocator ); + } + + VkResult vkGetPipelineKeyKHR( VkDevice device, + const VkPipelineCreateInfoKHR * pPipelineCreateInfo, + VkPipelineBinaryKeyKHR * pPipelineKey ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPipelineKeyKHR( device, pPipelineCreateInfo, pPipelineKey ); + } + + VkResult vkGetPipelineBinaryDataKHR( VkDevice device, + const VkPipelineBinaryDataInfoKHR * pInfo, + VkPipelineBinaryKeyKHR * pPipelineBinaryKey, + size_t * pPipelineBinaryDataSize, + void * pPipelineBinaryData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPipelineBinaryDataKHR( device, pInfo, pPipelineBinaryKey, pPipelineBinaryDataSize, pPipelineBinaryData ); + } + + VkResult vkReleaseCapturedPipelineDataKHR( VkDevice device, + const VkReleaseCapturedPipelineDataInfoKHR * pInfo, + const VkAllocationCallbacks * pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkReleaseCapturedPipelineDataKHR( device, pInfo, pAllocator ); + } + //=== VK_QCOM_tile_properties === VkResult vkGetFramebufferTilePropertiesQCOM( VkDevice device, @@ -5927,6 +5919,18 @@ namespace VULKAN_HPP_NAMESPACE } #endif +#if ( 14 <= VULKAN_HPP_CPP_VERSION ) + using std::exchange; +#else + template + VULKAN_HPP_CONSTEXPR_14 VULKAN_HPP_INLINE T exchange( T & obj, U && newValue ) + { + T oldValue = std::move( obj ); + obj = std::forward( newValue ); + return oldValue; + } +#endif + #if !defined( VULKAN_HPP_NO_SMART_HANDLE ) struct AllocationCallbacks; @@ -6144,7 +6148,7 @@ namespace VULKAN_HPP_NAMESPACE using RemoteAddressNV = void *; using SampleMask = uint32_t; - template + template struct CppType { }; @@ -6553,6 +6557,14 @@ namespace VULKAN_HPP_NAMESPACE CompressionExhaustedEXTError( char const * message ) : SystemError( make_error_code( Result::eErrorCompressionExhaustedEXT ), message ) {} }; + class NotEnoughSpaceKHRError : public SystemError + { + public: + NotEnoughSpaceKHRError( std::string const & message ) : SystemError( make_error_code( Result::eErrorNotEnoughSpaceKHR ), message ) {} + + NotEnoughSpaceKHRError( char const * message ) : SystemError( make_error_code( Result::eErrorNotEnoughSpaceKHR ), message ) {} + }; + namespace detail { [[noreturn]] VULKAN_HPP_INLINE void throwResultException( Result result, char const * message ) @@ -6595,17 +6607,13 @@ namespace VULKAN_HPP_NAMESPACE # endif /*VK_USE_PLATFORM_WIN32_KHR*/ case Result::eErrorInvalidVideoStdParametersKHR: throw InvalidVideoStdParametersKHRError( message ); case Result::eErrorCompressionExhaustedEXT: throw CompressionExhaustedEXTError( message ); + case Result::eErrorNotEnoughSpaceKHR: throw NotEnoughSpaceKHRError( message ); default: throw SystemError( make_error_code( result ), message ); } } } // namespace detail #endif - template - void ignore( T const & ) VULKAN_HPP_NOEXCEPT - { - } - template struct ResultValue { @@ -6718,65 +6726,76 @@ namespace VULKAN_HPP_NAMESPACE #endif }; - VULKAN_HPP_INLINE typename ResultValueType::type createResultValueType( Result result ) + namespace detail { -#ifdef VULKAN_HPP_NO_EXCEPTIONS - return result; -#else - ignore( result ); -#endif - } - - template - VULKAN_HPP_INLINE typename ResultValueType::type createResultValueType( Result result, T & data ) - { -#ifdef VULKAN_HPP_NO_EXCEPTIONS - return ResultValue( result, data ); -#else - ignore( result ); - return data; -#endif - } - - template - VULKAN_HPP_INLINE typename ResultValueType::type createResultValueType( Result result, T && data ) - { -#ifdef VULKAN_HPP_NO_EXCEPTIONS - return ResultValue( result, std::move( data ) ); -#else - ignore( result ); - return std::move( data ); -#endif - } - - VULKAN_HPP_INLINE void resultCheck( Result result, char const * message ) - { -#ifdef VULKAN_HPP_NO_EXCEPTIONS - ignore( result ); // just in case VULKAN_HPP_ASSERT_ON_RESULT is empty - ignore( message ); - VULKAN_HPP_ASSERT_ON_RESULT( result == Result::eSuccess ); -#else - if ( result != Result::eSuccess ) + template + void ignore( T const & ) VULKAN_HPP_NOEXCEPT { - detail::throwResultException( result, message ); } -#endif - } - VULKAN_HPP_INLINE void resultCheck( Result result, char const * message, std::initializer_list successCodes ) - { -#ifdef VULKAN_HPP_NO_EXCEPTIONS - ignore( result ); // just in case VULKAN_HPP_ASSERT_ON_RESULT is empty - ignore( message ); - ignore( successCodes ); // just in case VULKAN_HPP_ASSERT_ON_RESULT is empty - VULKAN_HPP_ASSERT_ON_RESULT( std::find( successCodes.begin(), successCodes.end(), result ) != successCodes.end() ); -#else - if ( std::find( successCodes.begin(), successCodes.end(), result ) == successCodes.end() ) + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type createResultValueType( VULKAN_HPP_NAMESPACE::Result result ) { - detail::throwResultException( result, message ); - } +#ifdef VULKAN_HPP_NO_EXCEPTIONS + return result; +#else + VULKAN_HPP_NAMESPACE::detail::ignore( result ); #endif - } + } + + template + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type createResultValueType( VULKAN_HPP_NAMESPACE::Result result, T & data ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + return ResultValue( result, data ); +#else + VULKAN_HPP_NAMESPACE::detail::ignore( result ); + return data; +#endif + } + + template + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type createResultValueType( VULKAN_HPP_NAMESPACE::Result result, T && data ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + return ResultValue( result, std::move( data ) ); +#else + VULKAN_HPP_NAMESPACE::detail::ignore( result ); + return std::move( data ); +#endif + } + } // namespace detail + + namespace detail + { + VULKAN_HPP_INLINE void resultCheck( Result result, char const * message ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_NAMESPACE::detail::ignore( result ); // just in case VULKAN_HPP_ASSERT_ON_RESULT is empty + VULKAN_HPP_NAMESPACE::detail::ignore( message ); + VULKAN_HPP_ASSERT_ON_RESULT( result == Result::eSuccess ); +#else + if ( result != Result::eSuccess ) + { + VULKAN_HPP_NAMESPACE::detail::throwResultException( result, message ); + } +#endif + } + + VULKAN_HPP_INLINE void resultCheck( Result result, char const * message, std::initializer_list successCodes ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_NAMESPACE::detail::ignore( result ); // just in case VULKAN_HPP_ASSERT_ON_RESULT is empty + VULKAN_HPP_NAMESPACE::detail::ignore( message ); + VULKAN_HPP_NAMESPACE::detail::ignore( successCodes ); // just in case VULKAN_HPP_ASSERT_ON_RESULT is empty + VULKAN_HPP_ASSERT_ON_RESULT( std::find( successCodes.begin(), successCodes.end(), result ) != successCodes.end() ); +#else + if ( std::find( successCodes.begin(), successCodes.end(), result ) == successCodes.end() ) + { + VULKAN_HPP_NAMESPACE::detail::throwResultException( result, message ); + } +#endif + } + } // namespace detail //=========================== //=== CONSTEXPR CONSTANTs === @@ -6847,6 +6866,9 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_shader_module_identifier === VULKAN_HPP_CONSTEXPR_INLINE uint32_t MaxShaderModuleIdentifierSizeEXT = VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT; + //=== VK_KHR_pipeline_binary === + VULKAN_HPP_CONSTEXPR_INLINE uint32_t MaxPipelineBinaryKeySizeKHR = VK_MAX_PIPELINE_BINARY_KEY_SIZE_KHR; + //=== VK_KHR_video_decode_av1 === VULKAN_HPP_CONSTEXPR_INLINE uint32_t MaxVideoAv1ReferencesPerFrameKHR = VK_MAX_VIDEO_AV1_REFERENCES_PER_FRAME_KHR; @@ -6993,10 +7015,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTDepthRangeUnrestrictedSpecVersion = VK_EXT_DEPTH_RANGE_UNRESTRICTED_SPEC_VERSION; //=== VK_KHR_sampler_mirror_clamp_to_edge === - VULKAN_HPP_DEPRECATED( "The VK_KHR_sampler_mirror_clamp_to_edge extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRSamplerMirrorClampToEdgeExtensionName = VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_sampler_mirror_clamp_to_edge extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRSamplerMirrorClampToEdgeSpecVersion = VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRSamplerMirrorClampToEdgeSpecVersion = VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION; //=== VK_IMG_filter_cubic === VULKAN_HPP_CONSTEXPR_INLINE auto IMGFilterCubicExtensionName = VK_IMG_FILTER_CUBIC_EXTENSION_NAME; @@ -7015,10 +7035,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto AMDShaderExplicitVertexParameterSpecVersion = VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_SPEC_VERSION; //=== VK_EXT_debug_marker === - VULKAN_HPP_DEPRECATED( "The VK_EXT_debug_marker extension has been promoted to VK_EXT_debug_utils." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTDebugMarkerExtensionName = VK_EXT_DEBUG_MARKER_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_debug_marker extension has been promoted to VK_EXT_debug_utils." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTDebugMarkerSpecVersion = VK_EXT_DEBUG_MARKER_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTDebugMarkerSpecVersion = VK_EXT_DEBUG_MARKER_SPEC_VERSION; //=== VK_KHR_video_queue === VULKAN_HPP_CONSTEXPR_INLINE auto KHRVideoQueueExtensionName = VK_KHR_VIDEO_QUEUE_EXTENSION_NAME; @@ -7051,10 +7069,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto NVXImageViewHandleSpecVersion = VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION; //=== VK_AMD_draw_indirect_count === - VULKAN_HPP_DEPRECATED( "The VK_AMD_draw_indirect_count extension has been promoted to VK_KHR_draw_indirect_count." ) VULKAN_HPP_CONSTEXPR_INLINE auto AMDDrawIndirectCountExtensionName = VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_AMD_draw_indirect_count extension has been promoted to VK_KHR_draw_indirect_count." ) - VULKAN_HPP_CONSTEXPR_INLINE auto AMDDrawIndirectCountSpecVersion = VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto AMDDrawIndirectCountSpecVersion = VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION; //=== VK_AMD_negative_viewport_height === VULKAN_HPP_DEPRECATED( "The VK_AMD_negative_viewport_height extension has been obsoleted by VK_KHR_maintenance1." ) @@ -7093,10 +7109,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto AMDShaderInfoSpecVersion = VK_AMD_SHADER_INFO_SPEC_VERSION; //=== VK_KHR_dynamic_rendering === - VULKAN_HPP_DEPRECATED( "The VK_KHR_dynamic_rendering extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRDynamicRenderingExtensionName = VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_dynamic_rendering extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRDynamicRenderingSpecVersion = VK_KHR_DYNAMIC_RENDERING_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRDynamicRenderingSpecVersion = VK_KHR_DYNAMIC_RENDERING_SPEC_VERSION; //=== VK_AMD_shader_image_load_store_lod === VULKAN_HPP_CONSTEXPR_INLINE auto AMDShaderImageLoadStoreLodExtensionName = VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_EXTENSION_NAME; @@ -7113,10 +7127,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto NVCornerSampledImageSpecVersion = VK_NV_CORNER_SAMPLED_IMAGE_SPEC_VERSION; //=== VK_KHR_multiview === - VULKAN_HPP_DEPRECATED( "The VK_KHR_multiview extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRMultiviewExtensionName = VK_KHR_MULTIVIEW_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_multiview extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRMultiviewSpecVersion = VK_KHR_MULTIVIEW_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRMultiviewSpecVersion = VK_KHR_MULTIVIEW_SPEC_VERSION; //=== VK_IMG_format_pvrtc === VULKAN_HPP_DEPRECATED( "The VK_IMG_format_pvrtc extension has been deprecated." ) @@ -7146,23 +7158,17 @@ namespace VULKAN_HPP_NAMESPACE #if defined( VK_USE_PLATFORM_WIN32_KHR ) //=== VK_NV_win32_keyed_mutex === - VULKAN_HPP_DEPRECATED( "The VK_NV_win32_keyed_mutex extension has been promoted to VK_KHR_win32_keyed_mutex." ) VULKAN_HPP_CONSTEXPR_INLINE auto NVWin32KeyedMutexExtensionName = VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_NV_win32_keyed_mutex extension has been promoted to VK_KHR_win32_keyed_mutex." ) - VULKAN_HPP_CONSTEXPR_INLINE auto NVWin32KeyedMutexSpecVersion = VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto NVWin32KeyedMutexSpecVersion = VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION; #endif /*VK_USE_PLATFORM_WIN32_KHR*/ //=== VK_KHR_get_physical_device_properties2 === - VULKAN_HPP_DEPRECATED( "The VK_KHR_get_physical_device_properties2 extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRGetPhysicalDeviceProperties2ExtensionName = VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_get_physical_device_properties2 extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRGetPhysicalDeviceProperties2SpecVersion = VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRGetPhysicalDeviceProperties2SpecVersion = VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION; //=== VK_KHR_device_group === - VULKAN_HPP_DEPRECATED( "The VK_KHR_device_group extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRDeviceGroupExtensionName = VK_KHR_DEVICE_GROUP_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_device_group extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRDeviceGroupSpecVersion = VK_KHR_DEVICE_GROUP_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRDeviceGroupSpecVersion = VK_KHR_DEVICE_GROUP_SPEC_VERSION; //=== VK_EXT_validation_flags === VULKAN_HPP_DEPRECATED( "The VK_EXT_validation_flags extension has been deprecated by VK_EXT_layer_settings." ) @@ -7177,10 +7183,8 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VK_USE_PLATFORM_VI_NN*/ //=== VK_KHR_shader_draw_parameters === - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_draw_parameters extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderDrawParametersExtensionName = VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_draw_parameters extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderDrawParametersSpecVersion = VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderDrawParametersSpecVersion = VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION; //=== VK_EXT_shader_subgroup_ballot === VULKAN_HPP_DEPRECATED( "The VK_EXT_shader_subgroup_ballot extension has been deprecated by VK_VERSION_1_2." ) @@ -7195,10 +7199,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTShaderSubgroupVoteSpecVersion = VK_EXT_SHADER_SUBGROUP_VOTE_SPEC_VERSION; //=== VK_EXT_texture_compression_astc_hdr === - VULKAN_HPP_DEPRECATED( "The VK_EXT_texture_compression_astc_hdr extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTTextureCompressionAstcHdrExtensionName = VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_texture_compression_astc_hdr extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTTextureCompressionAstcHdrSpecVersion = VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTTextureCompressionAstcHdrSpecVersion = VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_SPEC_VERSION; //=== VK_EXT_astc_decode_mode === VULKAN_HPP_CONSTEXPR_INLINE auto EXTAstcDecodeModeExtensionName = VK_EXT_ASTC_DECODE_MODE_EXTENSION_NAME; @@ -7209,28 +7211,20 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTPipelineRobustnessSpecVersion = VK_EXT_PIPELINE_ROBUSTNESS_SPEC_VERSION; //=== VK_KHR_maintenance1 === - VULKAN_HPP_DEPRECATED( "The VK_KHR_maintenance1 extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance1ExtensionName = VK_KHR_MAINTENANCE_1_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_maintenance1 extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance1SpecVersion = VK_KHR_MAINTENANCE_1_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance1SpecVersion = VK_KHR_MAINTENANCE_1_SPEC_VERSION; //=== VK_KHR_device_group_creation === - VULKAN_HPP_DEPRECATED( "The VK_KHR_device_group_creation extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRDeviceGroupCreationExtensionName = VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_device_group_creation extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRDeviceGroupCreationSpecVersion = VK_KHR_DEVICE_GROUP_CREATION_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRDeviceGroupCreationSpecVersion = VK_KHR_DEVICE_GROUP_CREATION_SPEC_VERSION; //=== VK_KHR_external_memory_capabilities === - VULKAN_HPP_DEPRECATED( "The VK_KHR_external_memory_capabilities extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalMemoryCapabilitiesExtensionName = VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_external_memory_capabilities extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalMemoryCapabilitiesSpecVersion = VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalMemoryCapabilitiesSpecVersion = VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION; //=== VK_KHR_external_memory === - VULKAN_HPP_DEPRECATED( "The VK_KHR_external_memory extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalMemoryExtensionName = VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_external_memory extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalMemorySpecVersion = VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalMemorySpecVersion = VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION; #if defined( VK_USE_PLATFORM_WIN32_KHR ) //=== VK_KHR_external_memory_win32 === @@ -7249,16 +7243,12 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VK_USE_PLATFORM_WIN32_KHR*/ //=== VK_KHR_external_semaphore_capabilities === - VULKAN_HPP_DEPRECATED( "The VK_KHR_external_semaphore_capabilities extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalSemaphoreCapabilitiesExtensionName = VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_external_semaphore_capabilities extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalSemaphoreCapabilitiesSpecVersion = VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalSemaphoreCapabilitiesSpecVersion = VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION; //=== VK_KHR_external_semaphore === - VULKAN_HPP_DEPRECATED( "The VK_KHR_external_semaphore extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalSemaphoreExtensionName = VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_external_semaphore extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalSemaphoreSpecVersion = VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalSemaphoreSpecVersion = VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION; #if defined( VK_USE_PLATFORM_WIN32_KHR ) //=== VK_KHR_external_semaphore_win32 === @@ -7279,26 +7269,20 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTConditionalRenderingSpecVersion = VK_EXT_CONDITIONAL_RENDERING_SPEC_VERSION; //=== VK_KHR_shader_float16_int8 === - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_float16_int8 extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderFloat16Int8ExtensionName = VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_float16_int8 extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderFloat16Int8SpecVersion = VK_KHR_SHADER_FLOAT16_INT8_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderFloat16Int8SpecVersion = VK_KHR_SHADER_FLOAT16_INT8_SPEC_VERSION; //=== VK_KHR_16bit_storage === - VULKAN_HPP_DEPRECATED( "The VK_KHR_16bit_storage extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHR16BitStorageExtensionName = VK_KHR_16BIT_STORAGE_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_16bit_storage extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHR16BitStorageSpecVersion = VK_KHR_16BIT_STORAGE_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHR16BitStorageSpecVersion = VK_KHR_16BIT_STORAGE_SPEC_VERSION; //=== VK_KHR_incremental_present === VULKAN_HPP_CONSTEXPR_INLINE auto KHRIncrementalPresentExtensionName = VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto KHRIncrementalPresentSpecVersion = VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION; //=== VK_KHR_descriptor_update_template === - VULKAN_HPP_DEPRECATED( "The VK_KHR_descriptor_update_template extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRDescriptorUpdateTemplateExtensionName = VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_descriptor_update_template extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRDescriptorUpdateTemplateSpecVersion = VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRDescriptorUpdateTemplateSpecVersion = VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION; //=== VK_NV_clip_space_w_scaling === VULKAN_HPP_CONSTEXPR_INLINE auto NVClipSpaceWScalingExtensionName = VK_NV_CLIP_SPACE_W_SCALING_EXTENSION_NAME; @@ -7367,16 +7351,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTHdrMetadataSpecVersion = VK_EXT_HDR_METADATA_SPEC_VERSION; //=== VK_KHR_imageless_framebuffer === - VULKAN_HPP_DEPRECATED( "The VK_KHR_imageless_framebuffer extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRImagelessFramebufferExtensionName = VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_imageless_framebuffer extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRImagelessFramebufferSpecVersion = VK_KHR_IMAGELESS_FRAMEBUFFER_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRImagelessFramebufferSpecVersion = VK_KHR_IMAGELESS_FRAMEBUFFER_SPEC_VERSION; //=== VK_KHR_create_renderpass2 === - VULKAN_HPP_DEPRECATED( "The VK_KHR_create_renderpass2 extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRCreateRenderpass2ExtensionName = VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_create_renderpass2 extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRCreateRenderpass2SpecVersion = VK_KHR_CREATE_RENDERPASS_2_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRCreateRenderpass2SpecVersion = VK_KHR_CREATE_RENDERPASS_2_SPEC_VERSION; //=== VK_IMG_relaxed_line_rasterization === VULKAN_HPP_CONSTEXPR_INLINE auto IMGRelaxedLineRasterizationExtensionName = VK_IMG_RELAXED_LINE_RASTERIZATION_EXTENSION_NAME; @@ -7387,16 +7367,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto KHRSharedPresentableImageSpecVersion = VK_KHR_SHARED_PRESENTABLE_IMAGE_SPEC_VERSION; //=== VK_KHR_external_fence_capabilities === - VULKAN_HPP_DEPRECATED( "The VK_KHR_external_fence_capabilities extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalFenceCapabilitiesExtensionName = VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_external_fence_capabilities extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalFenceCapabilitiesSpecVersion = VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalFenceCapabilitiesSpecVersion = VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION; //=== VK_KHR_external_fence === - VULKAN_HPP_DEPRECATED( "The VK_KHR_external_fence extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalFenceExtensionName = VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_external_fence extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalFenceSpecVersion = VK_KHR_EXTERNAL_FENCE_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRExternalFenceSpecVersion = VK_KHR_EXTERNAL_FENCE_SPEC_VERSION; #if defined( VK_USE_PLATFORM_WIN32_KHR ) //=== VK_KHR_external_fence_win32 === @@ -7413,20 +7389,16 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto KHRPerformanceQuerySpecVersion = VK_KHR_PERFORMANCE_QUERY_SPEC_VERSION; //=== VK_KHR_maintenance2 === - VULKAN_HPP_DEPRECATED( "The VK_KHR_maintenance2 extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance2ExtensionName = VK_KHR_MAINTENANCE_2_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_maintenance2 extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance2SpecVersion = VK_KHR_MAINTENANCE_2_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance2SpecVersion = VK_KHR_MAINTENANCE_2_SPEC_VERSION; //=== VK_KHR_get_surface_capabilities2 === VULKAN_HPP_CONSTEXPR_INLINE auto KHRGetSurfaceCapabilities2ExtensionName = VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto KHRGetSurfaceCapabilities2SpecVersion = VK_KHR_GET_SURFACE_CAPABILITIES_2_SPEC_VERSION; //=== VK_KHR_variable_pointers === - VULKAN_HPP_DEPRECATED( "The VK_KHR_variable_pointers extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRVariablePointersExtensionName = VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_variable_pointers extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRVariablePointersSpecVersion = VK_KHR_VARIABLE_POINTERS_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRVariablePointersSpecVersion = VK_KHR_VARIABLE_POINTERS_SPEC_VERSION; //=== VK_KHR_get_display_properties2 === VULKAN_HPP_CONSTEXPR_INLINE auto KHRGetDisplayProperties2ExtensionName = VK_KHR_GET_DISPLAY_PROPERTIES_2_EXTENSION_NAME; @@ -7457,10 +7429,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTQueueFamilyForeignSpecVersion = VK_EXT_QUEUE_FAMILY_FOREIGN_SPEC_VERSION; //=== VK_KHR_dedicated_allocation === - VULKAN_HPP_DEPRECATED( "The VK_KHR_dedicated_allocation extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRDedicatedAllocationExtensionName = VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_dedicated_allocation extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRDedicatedAllocationSpecVersion = VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRDedicatedAllocationSpecVersion = VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION; //=== VK_EXT_debug_utils === VULKAN_HPP_CONSTEXPR_INLINE auto EXTDebugUtilsExtensionName = VK_EXT_DEBUG_UTILS_EXTENSION_NAME; @@ -7473,16 +7443,12 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ //=== VK_EXT_sampler_filter_minmax === - VULKAN_HPP_DEPRECATED( "The VK_EXT_sampler_filter_minmax extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTSamplerFilterMinmaxExtensionName = VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_sampler_filter_minmax extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTSamplerFilterMinmaxSpecVersion = VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTSamplerFilterMinmaxSpecVersion = VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION; //=== VK_KHR_storage_buffer_storage_class === - VULKAN_HPP_DEPRECATED( "The VK_KHR_storage_buffer_storage_class extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRStorageBufferStorageClassExtensionName = VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_storage_buffer_storage_class extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRStorageBufferStorageClassSpecVersion = VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRStorageBufferStorageClassSpecVersion = VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_SPEC_VERSION; //=== VK_AMD_gpu_shader_int16 === VULKAN_HPP_DEPRECATED( "The VK_AMD_gpu_shader_int16 extension has been deprecated by VK_KHR_shader_float16_int8." ) @@ -7505,10 +7471,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto AMDShaderFragmentMaskSpecVersion = VK_AMD_SHADER_FRAGMENT_MASK_SPEC_VERSION; //=== VK_EXT_inline_uniform_block === - VULKAN_HPP_DEPRECATED( "The VK_EXT_inline_uniform_block extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTInlineUniformBlockExtensionName = VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_inline_uniform_block extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTInlineUniformBlockSpecVersion = VK_EXT_INLINE_UNIFORM_BLOCK_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTInlineUniformBlockSpecVersion = VK_EXT_INLINE_UNIFORM_BLOCK_SPEC_VERSION; //=== VK_EXT_shader_stencil_export === VULKAN_HPP_CONSTEXPR_INLINE auto EXTShaderStencilExportExtensionName = VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME; @@ -7519,22 +7483,16 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTSampleLocationsSpecVersion = VK_EXT_SAMPLE_LOCATIONS_SPEC_VERSION; //=== VK_KHR_relaxed_block_layout === - VULKAN_HPP_DEPRECATED( "The VK_KHR_relaxed_block_layout extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRRelaxedBlockLayoutExtensionName = VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_relaxed_block_layout extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRRelaxedBlockLayoutSpecVersion = VK_KHR_RELAXED_BLOCK_LAYOUT_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRRelaxedBlockLayoutSpecVersion = VK_KHR_RELAXED_BLOCK_LAYOUT_SPEC_VERSION; //=== VK_KHR_get_memory_requirements2 === - VULKAN_HPP_DEPRECATED( "The VK_KHR_get_memory_requirements2 extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRGetMemoryRequirements2ExtensionName = VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_get_memory_requirements2 extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRGetMemoryRequirements2SpecVersion = VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRGetMemoryRequirements2SpecVersion = VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION; //=== VK_KHR_image_format_list === - VULKAN_HPP_DEPRECATED( "The VK_KHR_image_format_list extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRImageFormatListExtensionName = VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_image_format_list extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRImageFormatListSpecVersion = VK_KHR_IMAGE_FORMAT_LIST_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRImageFormatListSpecVersion = VK_KHR_IMAGE_FORMAT_LIST_SPEC_VERSION; //=== VK_EXT_blend_operation_advanced === VULKAN_HPP_CONSTEXPR_INLINE auto EXTBlendOperationAdvancedExtensionName = VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME; @@ -7573,16 +7531,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTPostDepthCoverageSpecVersion = VK_EXT_POST_DEPTH_COVERAGE_SPEC_VERSION; //=== VK_KHR_sampler_ycbcr_conversion === - VULKAN_HPP_DEPRECATED( "The VK_KHR_sampler_ycbcr_conversion extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRSamplerYcbcrConversionExtensionName = VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_sampler_ycbcr_conversion extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRSamplerYcbcrConversionSpecVersion = VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRSamplerYcbcrConversionSpecVersion = VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION; //=== VK_KHR_bind_memory2 === - VULKAN_HPP_DEPRECATED( "The VK_KHR_bind_memory2 extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRBindMemory2ExtensionName = VK_KHR_BIND_MEMORY_2_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_bind_memory2 extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRBindMemory2SpecVersion = VK_KHR_BIND_MEMORY_2_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRBindMemory2SpecVersion = VK_KHR_BIND_MEMORY_2_SPEC_VERSION; //=== VK_EXT_image_drm_format_modifier === VULKAN_HPP_CONSTEXPR_INLINE auto EXTImageDrmFormatModifierExtensionName = VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME; @@ -7593,16 +7547,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTValidationCacheSpecVersion = VK_EXT_VALIDATION_CACHE_SPEC_VERSION; //=== VK_EXT_descriptor_indexing === - VULKAN_HPP_DEPRECATED( "The VK_EXT_descriptor_indexing extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTDescriptorIndexingExtensionName = VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_descriptor_indexing extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTDescriptorIndexingSpecVersion = VK_EXT_DESCRIPTOR_INDEXING_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTDescriptorIndexingSpecVersion = VK_EXT_DESCRIPTOR_INDEXING_SPEC_VERSION; //=== VK_EXT_shader_viewport_index_layer === - VULKAN_HPP_DEPRECATED( "The VK_EXT_shader_viewport_index_layer extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTShaderViewportIndexLayerExtensionName = VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_shader_viewport_index_layer extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTShaderViewportIndexLayerSpecVersion = VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTShaderViewportIndexLayerSpecVersion = VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_SPEC_VERSION; #if defined( VK_ENABLE_BETA_EXTENSIONS ) //=== VK_KHR_portability_subset === @@ -7615,24 +7565,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto NVShadingRateImageSpecVersion = VK_NV_SHADING_RATE_IMAGE_SPEC_VERSION; //=== VK_NV_ray_tracing === + VULKAN_HPP_DEPRECATED( "The VK_NV_ray_tracing extension has been deprecated by VK_KHR_ray_tracing_pipeline." ) VULKAN_HPP_CONSTEXPR_INLINE auto NVRayTracingExtensionName = VK_NV_RAY_TRACING_EXTENSION_NAME; - VULKAN_HPP_CONSTEXPR_INLINE auto NVRayTracingSpecVersion = VK_NV_RAY_TRACING_SPEC_VERSION; + VULKAN_HPP_DEPRECATED( "The VK_NV_ray_tracing extension has been deprecated by VK_KHR_ray_tracing_pipeline." ) + VULKAN_HPP_CONSTEXPR_INLINE auto NVRayTracingSpecVersion = VK_NV_RAY_TRACING_SPEC_VERSION; //=== VK_NV_representative_fragment_test === VULKAN_HPP_CONSTEXPR_INLINE auto NVRepresentativeFragmentTestExtensionName = VK_NV_REPRESENTATIVE_FRAGMENT_TEST_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto NVRepresentativeFragmentTestSpecVersion = VK_NV_REPRESENTATIVE_FRAGMENT_TEST_SPEC_VERSION; //=== VK_KHR_maintenance3 === - VULKAN_HPP_DEPRECATED( "The VK_KHR_maintenance3 extension has been promoted to core in version 1.1." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance3ExtensionName = VK_KHR_MAINTENANCE_3_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_maintenance3 extension has been promoted to core in version 1.1." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance3SpecVersion = VK_KHR_MAINTENANCE_3_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance3SpecVersion = VK_KHR_MAINTENANCE_3_SPEC_VERSION; //=== VK_KHR_draw_indirect_count === - VULKAN_HPP_DEPRECATED( "The VK_KHR_draw_indirect_count extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRDrawIndirectCountExtensionName = VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_draw_indirect_count extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRDrawIndirectCountSpecVersion = VK_KHR_DRAW_INDIRECT_COUNT_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRDrawIndirectCountSpecVersion = VK_KHR_DRAW_INDIRECT_COUNT_SPEC_VERSION; //=== VK_EXT_filter_cubic === VULKAN_HPP_CONSTEXPR_INLINE auto EXTFilterCubicExtensionName = VK_EXT_FILTER_CUBIC_EXTENSION_NAME; @@ -7643,22 +7591,16 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto QCOMRenderPassShaderResolveSpecVersion = VK_QCOM_RENDER_PASS_SHADER_RESOLVE_SPEC_VERSION; //=== VK_EXT_global_priority === - VULKAN_HPP_DEPRECATED( "The VK_EXT_global_priority extension has been promoted to VK_KHR_global_priority." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTGlobalPriorityExtensionName = VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_global_priority extension has been promoted to VK_KHR_global_priority." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTGlobalPrioritySpecVersion = VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTGlobalPrioritySpecVersion = VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION; //=== VK_KHR_shader_subgroup_extended_types === - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_subgroup_extended_types extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderSubgroupExtendedTypesExtensionName = VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_subgroup_extended_types extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderSubgroupExtendedTypesSpecVersion = VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderSubgroupExtendedTypesSpecVersion = VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_SPEC_VERSION; //=== VK_KHR_8bit_storage === - VULKAN_HPP_DEPRECATED( "The VK_KHR_8bit_storage extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHR8BitStorageExtensionName = VK_KHR_8BIT_STORAGE_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_8bit_storage extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHR8BitStorageSpecVersion = VK_KHR_8BIT_STORAGE_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHR8BitStorageSpecVersion = VK_KHR_8BIT_STORAGE_SPEC_VERSION; //=== VK_EXT_external_memory_host === VULKAN_HPP_CONSTEXPR_INLINE auto EXTExternalMemoryHostExtensionName = VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME; @@ -7669,10 +7611,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto AMDBufferMarkerSpecVersion = VK_AMD_BUFFER_MARKER_SPEC_VERSION; //=== VK_KHR_shader_atomic_int64 === - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_atomic_int64 extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderAtomicInt64ExtensionName = VK_KHR_SHADER_ATOMIC_INT64_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_atomic_int64 extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderAtomicInt64SpecVersion = VK_KHR_SHADER_ATOMIC_INT64_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderAtomicInt64SpecVersion = VK_KHR_SHADER_ATOMIC_INT64_SPEC_VERSION; //=== VK_KHR_shader_clock === VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderClockExtensionName = VK_KHR_SHADER_CLOCK_EXTENSION_NAME; @@ -7683,10 +7623,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto AMDPipelineCompilerControlSpecVersion = VK_AMD_PIPELINE_COMPILER_CONTROL_SPEC_VERSION; //=== VK_EXT_calibrated_timestamps === - VULKAN_HPP_DEPRECATED( "The VK_EXT_calibrated_timestamps extension has been promoted to VK_KHR_calibrated_timestamps." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTCalibratedTimestampsExtensionName = VK_EXT_CALIBRATED_TIMESTAMPS_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_calibrated_timestamps extension has been promoted to VK_KHR_calibrated_timestamps." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTCalibratedTimestampsSpecVersion = VK_EXT_CALIBRATED_TIMESTAMPS_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTCalibratedTimestampsSpecVersion = VK_EXT_CALIBRATED_TIMESTAMPS_SPEC_VERSION; //=== VK_AMD_shader_core_properties === VULKAN_HPP_CONSTEXPR_INLINE auto AMDShaderCorePropertiesExtensionName = VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME; @@ -7705,10 +7643,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto AMDMemoryOverallocationBehaviorSpecVersion = VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_SPEC_VERSION; //=== VK_EXT_vertex_attribute_divisor === - VULKAN_HPP_DEPRECATED( "The VK_EXT_vertex_attribute_divisor extension has been promoted to VK_KHR_vertex_attribute_divisor." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTVertexAttributeDivisorExtensionName = VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_vertex_attribute_divisor extension has been promoted to VK_KHR_vertex_attribute_divisor." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTVertexAttributeDivisorSpecVersion = VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTVertexAttributeDivisorSpecVersion = VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION; #if defined( VK_USE_PLATFORM_GGP ) //=== VK_GGP_frame_token === @@ -7717,32 +7653,24 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VK_USE_PLATFORM_GGP*/ //=== VK_EXT_pipeline_creation_feedback === - VULKAN_HPP_DEPRECATED( "The VK_EXT_pipeline_creation_feedback extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTPipelineCreationFeedbackExtensionName = VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_pipeline_creation_feedback extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTPipelineCreationFeedbackSpecVersion = VK_EXT_PIPELINE_CREATION_FEEDBACK_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTPipelineCreationFeedbackSpecVersion = VK_EXT_PIPELINE_CREATION_FEEDBACK_SPEC_VERSION; //=== VK_KHR_driver_properties === - VULKAN_HPP_DEPRECATED( "The VK_KHR_driver_properties extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRDriverPropertiesExtensionName = VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_driver_properties extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRDriverPropertiesSpecVersion = VK_KHR_DRIVER_PROPERTIES_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRDriverPropertiesSpecVersion = VK_KHR_DRIVER_PROPERTIES_SPEC_VERSION; //=== VK_KHR_shader_float_controls === - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_float_controls extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderFloatControlsExtensionName = VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_float_controls extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderFloatControlsSpecVersion = VK_KHR_SHADER_FLOAT_CONTROLS_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderFloatControlsSpecVersion = VK_KHR_SHADER_FLOAT_CONTROLS_SPEC_VERSION; //=== VK_NV_shader_subgroup_partitioned === VULKAN_HPP_CONSTEXPR_INLINE auto NVShaderSubgroupPartitionedExtensionName = VK_NV_SHADER_SUBGROUP_PARTITIONED_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto NVShaderSubgroupPartitionedSpecVersion = VK_NV_SHADER_SUBGROUP_PARTITIONED_SPEC_VERSION; //=== VK_KHR_depth_stencil_resolve === - VULKAN_HPP_DEPRECATED( "The VK_KHR_depth_stencil_resolve extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRDepthStencilResolveExtensionName = VK_KHR_DEPTH_STENCIL_RESOLVE_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_depth_stencil_resolve extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRDepthStencilResolveSpecVersion = VK_KHR_DEPTH_STENCIL_RESOLVE_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRDepthStencilResolveSpecVersion = VK_KHR_DEPTH_STENCIL_RESOLVE_SPEC_VERSION; //=== VK_KHR_swapchain_mutable_format === VULKAN_HPP_CONSTEXPR_INLINE auto KHRSwapchainMutableFormatExtensionName = VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME; @@ -7757,10 +7685,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto NVMeshShaderSpecVersion = VK_NV_MESH_SHADER_SPEC_VERSION; //=== VK_NV_fragment_shader_barycentric === - VULKAN_HPP_DEPRECATED( "The VK_NV_fragment_shader_barycentric extension has been promoted to VK_KHR_fragment_shader_barycentric." ) VULKAN_HPP_CONSTEXPR_INLINE auto NVFragmentShaderBarycentricExtensionName = VK_NV_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_NV_fragment_shader_barycentric extension has been promoted to VK_KHR_fragment_shader_barycentric." ) - VULKAN_HPP_CONSTEXPR_INLINE auto NVFragmentShaderBarycentricSpecVersion = VK_NV_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto NVFragmentShaderBarycentricSpecVersion = VK_NV_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION; //=== VK_NV_shader_image_footprint === VULKAN_HPP_CONSTEXPR_INLINE auto NVShaderImageFootprintExtensionName = VK_NV_SHADER_IMAGE_FOOTPRINT_EXTENSION_NAME; @@ -7775,10 +7701,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto NVDeviceDiagnosticCheckpointsSpecVersion = VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_SPEC_VERSION; //=== VK_KHR_timeline_semaphore === - VULKAN_HPP_DEPRECATED( "The VK_KHR_timeline_semaphore extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRTimelineSemaphoreExtensionName = VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_timeline_semaphore extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRTimelineSemaphoreSpecVersion = VK_KHR_TIMELINE_SEMAPHORE_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRTimelineSemaphoreSpecVersion = VK_KHR_TIMELINE_SEMAPHORE_SPEC_VERSION; //=== VK_INTEL_shader_integer_functions2 === VULKAN_HPP_CONSTEXPR_INLINE auto INTELShaderIntegerFunctions2ExtensionName = VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_EXTENSION_NAME; @@ -7789,10 +7713,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto INTELPerformanceQuerySpecVersion = VK_INTEL_PERFORMANCE_QUERY_SPEC_VERSION; //=== VK_KHR_vulkan_memory_model === - VULKAN_HPP_DEPRECATED( "The VK_KHR_vulkan_memory_model extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRVulkanMemoryModelExtensionName = VK_KHR_VULKAN_MEMORY_MODEL_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_vulkan_memory_model extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRVulkanMemoryModelSpecVersion = VK_KHR_VULKAN_MEMORY_MODEL_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRVulkanMemoryModelSpecVersion = VK_KHR_VULKAN_MEMORY_MODEL_SPEC_VERSION; //=== VK_EXT_pci_bus_info === VULKAN_HPP_CONSTEXPR_INLINE auto EXTPciBusInfoExtensionName = VK_EXT_PCI_BUS_INFO_EXTENSION_NAME; @@ -7809,10 +7731,8 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VK_USE_PLATFORM_FUCHSIA*/ //=== VK_KHR_shader_terminate_invocation === - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_terminate_invocation extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderTerminateInvocationExtensionName = VK_KHR_SHADER_TERMINATE_INVOCATION_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_terminate_invocation extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderTerminateInvocationSpecVersion = VK_KHR_SHADER_TERMINATE_INVOCATION_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderTerminateInvocationSpecVersion = VK_KHR_SHADER_TERMINATE_INVOCATION_SPEC_VERSION; #if defined( VK_USE_PLATFORM_METAL_EXT ) //=== VK_EXT_metal_surface === @@ -7825,10 +7745,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTFragmentDensityMapSpecVersion = VK_EXT_FRAGMENT_DENSITY_MAP_SPEC_VERSION; //=== VK_EXT_scalar_block_layout === - VULKAN_HPP_DEPRECATED( "The VK_EXT_scalar_block_layout extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTScalarBlockLayoutExtensionName = VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_scalar_block_layout extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTScalarBlockLayoutSpecVersion = VK_EXT_SCALAR_BLOCK_LAYOUT_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTScalarBlockLayoutSpecVersion = VK_EXT_SCALAR_BLOCK_LAYOUT_SPEC_VERSION; //=== VK_GOOGLE_hlsl_functionality1 === VULKAN_HPP_CONSTEXPR_INLINE auto GOOGLEHlslFunctionality1ExtensionName = VK_GOOGLE_HLSL_FUNCTIONALITY_1_EXTENSION_NAME; @@ -7839,10 +7757,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto GOOGLEDecorateStringSpecVersion = VK_GOOGLE_DECORATE_STRING_SPEC_VERSION; //=== VK_EXT_subgroup_size_control === - VULKAN_HPP_DEPRECATED( "The VK_EXT_subgroup_size_control extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTSubgroupSizeControlExtensionName = VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_subgroup_size_control extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTSubgroupSizeControlSpecVersion = VK_EXT_SUBGROUP_SIZE_CONTROL_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTSubgroupSizeControlSpecVersion = VK_EXT_SUBGROUP_SIZE_CONTROL_SPEC_VERSION; //=== VK_KHR_fragment_shading_rate === VULKAN_HPP_CONSTEXPR_INLINE auto KHRFragmentShadingRateExtensionName = VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME; @@ -7869,10 +7785,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderQuadControlSpecVersion = VK_KHR_SHADER_QUAD_CONTROL_SPEC_VERSION; //=== VK_KHR_spirv_1_4 === - VULKAN_HPP_DEPRECATED( "The VK_KHR_spirv_1_4 extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRSpirv14ExtensionName = VK_KHR_SPIRV_1_4_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_spirv_1_4 extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRSpirv14SpecVersion = VK_KHR_SPIRV_1_4_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRSpirv14SpecVersion = VK_KHR_SPIRV_1_4_SPEC_VERSION; //=== VK_EXT_memory_budget === VULKAN_HPP_CONSTEXPR_INLINE auto EXTMemoryBudgetExtensionName = VK_EXT_MEMORY_BUDGET_EXTENSION_NAME; @@ -7891,10 +7805,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto NVDedicatedAllocationImageAliasingSpecVersion = VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_SPEC_VERSION; //=== VK_KHR_separate_depth_stencil_layouts === - VULKAN_HPP_DEPRECATED( "The VK_KHR_separate_depth_stencil_layouts extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRSeparateDepthStencilLayoutsExtensionName = VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_separate_depth_stencil_layouts extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRSeparateDepthStencilLayoutsSpecVersion = VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRSeparateDepthStencilLayoutsSpecVersion = VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_SPEC_VERSION; //=== VK_EXT_buffer_device_address === VULKAN_HPP_DEPRECATED( "The VK_EXT_buffer_device_address extension has been deprecated by VK_KHR_buffer_device_address." ) @@ -7903,16 +7815,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTBufferDeviceAddressSpecVersion = VK_EXT_BUFFER_DEVICE_ADDRESS_SPEC_VERSION; //=== VK_EXT_tooling_info === - VULKAN_HPP_DEPRECATED( "The VK_EXT_tooling_info extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTToolingInfoExtensionName = VK_EXT_TOOLING_INFO_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_tooling_info extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTToolingInfoSpecVersion = VK_EXT_TOOLING_INFO_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTToolingInfoSpecVersion = VK_EXT_TOOLING_INFO_SPEC_VERSION; //=== VK_EXT_separate_stencil_usage === - VULKAN_HPP_DEPRECATED( "The VK_EXT_separate_stencil_usage extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTSeparateStencilUsageExtensionName = VK_EXT_SEPARATE_STENCIL_USAGE_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_separate_stencil_usage extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTSeparateStencilUsageSpecVersion = VK_EXT_SEPARATE_STENCIL_USAGE_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTSeparateStencilUsageSpecVersion = VK_EXT_SEPARATE_STENCIL_USAGE_SPEC_VERSION; //=== VK_EXT_validation_features === VULKAN_HPP_DEPRECATED( "The VK_EXT_validation_features extension has been deprecated by VK_EXT_layer_settings." ) @@ -7941,10 +7849,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTYcbcrImageArraysSpecVersion = VK_EXT_YCBCR_IMAGE_ARRAYS_SPEC_VERSION; //=== VK_KHR_uniform_buffer_standard_layout === - VULKAN_HPP_DEPRECATED( "The VK_KHR_uniform_buffer_standard_layout extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRUniformBufferStandardLayoutExtensionName = VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_uniform_buffer_standard_layout extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRUniformBufferStandardLayoutSpecVersion = VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRUniformBufferStandardLayoutSpecVersion = VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_SPEC_VERSION; //=== VK_EXT_provoking_vertex === VULKAN_HPP_CONSTEXPR_INLINE auto EXTProvokingVertexExtensionName = VK_EXT_PROVOKING_VERTEX_EXTENSION_NAME; @@ -7961,38 +7867,28 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTHeadlessSurfaceSpecVersion = VK_EXT_HEADLESS_SURFACE_SPEC_VERSION; //=== VK_KHR_buffer_device_address === - VULKAN_HPP_DEPRECATED( "The VK_KHR_buffer_device_address extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRBufferDeviceAddressExtensionName = VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_buffer_device_address extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRBufferDeviceAddressSpecVersion = VK_KHR_BUFFER_DEVICE_ADDRESS_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRBufferDeviceAddressSpecVersion = VK_KHR_BUFFER_DEVICE_ADDRESS_SPEC_VERSION; //=== VK_EXT_line_rasterization === - VULKAN_HPP_DEPRECATED( "The VK_EXT_line_rasterization extension has been promoted to VK_KHR_line_rasterization." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTLineRasterizationExtensionName = VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_line_rasterization extension has been promoted to VK_KHR_line_rasterization." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTLineRasterizationSpecVersion = VK_EXT_LINE_RASTERIZATION_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTLineRasterizationSpecVersion = VK_EXT_LINE_RASTERIZATION_SPEC_VERSION; //=== VK_EXT_shader_atomic_float === VULKAN_HPP_CONSTEXPR_INLINE auto EXTShaderAtomicFloatExtensionName = VK_EXT_SHADER_ATOMIC_FLOAT_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto EXTShaderAtomicFloatSpecVersion = VK_EXT_SHADER_ATOMIC_FLOAT_SPEC_VERSION; //=== VK_EXT_host_query_reset === - VULKAN_HPP_DEPRECATED( "The VK_EXT_host_query_reset extension has been promoted to core in version 1.2." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTHostQueryResetExtensionName = VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_host_query_reset extension has been promoted to core in version 1.2." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTHostQueryResetSpecVersion = VK_EXT_HOST_QUERY_RESET_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTHostQueryResetSpecVersion = VK_EXT_HOST_QUERY_RESET_SPEC_VERSION; //=== VK_EXT_index_type_uint8 === - VULKAN_HPP_DEPRECATED( "The VK_EXT_index_type_uint8 extension has been promoted to VK_KHR_index_type_uint8." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTIndexTypeUint8ExtensionName = VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_index_type_uint8 extension has been promoted to VK_KHR_index_type_uint8." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTIndexTypeUint8SpecVersion = VK_EXT_INDEX_TYPE_UINT8_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTIndexTypeUint8SpecVersion = VK_EXT_INDEX_TYPE_UINT8_SPEC_VERSION; //=== VK_EXT_extended_dynamic_state === - VULKAN_HPP_DEPRECATED( "The VK_EXT_extended_dynamic_state extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTExtendedDynamicStateExtensionName = VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_extended_dynamic_state extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTExtendedDynamicStateSpecVersion = VK_EXT_EXTENDED_DYNAMIC_STATE_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTExtendedDynamicStateSpecVersion = VK_EXT_EXTENDED_DYNAMIC_STATE_SPEC_VERSION; //=== VK_KHR_deferred_host_operations === VULKAN_HPP_CONSTEXPR_INLINE auto KHRDeferredHostOperationsExtensionName = VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME; @@ -8027,10 +7923,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTSwapchainMaintenance1SpecVersion = VK_EXT_SWAPCHAIN_MAINTENANCE_1_SPEC_VERSION; //=== VK_EXT_shader_demote_to_helper_invocation === - VULKAN_HPP_DEPRECATED( "The VK_EXT_shader_demote_to_helper_invocation extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTShaderDemoteToHelperInvocationExtensionName = VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_shader_demote_to_helper_invocation extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTShaderDemoteToHelperInvocationSpecVersion = VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTShaderDemoteToHelperInvocationSpecVersion = VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_SPEC_VERSION; //=== VK_NV_device_generated_commands === VULKAN_HPP_CONSTEXPR_INLINE auto NVDeviceGeneratedCommandsExtensionName = VK_NV_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME; @@ -8041,16 +7935,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto NVInheritedViewportScissorSpecVersion = VK_NV_INHERITED_VIEWPORT_SCISSOR_SPEC_VERSION; //=== VK_KHR_shader_integer_dot_product === - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_integer_dot_product extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderIntegerDotProductExtensionName = VK_KHR_SHADER_INTEGER_DOT_PRODUCT_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_integer_dot_product extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderIntegerDotProductSpecVersion = VK_KHR_SHADER_INTEGER_DOT_PRODUCT_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderIntegerDotProductSpecVersion = VK_KHR_SHADER_INTEGER_DOT_PRODUCT_SPEC_VERSION; //=== VK_EXT_texel_buffer_alignment === - VULKAN_HPP_DEPRECATED( "The VK_EXT_texel_buffer_alignment extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTTexelBufferAlignmentExtensionName = VK_EXT_TEXEL_BUFFER_ALIGNMENT_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_texel_buffer_alignment extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTTexelBufferAlignmentSpecVersion = VK_EXT_TEXEL_BUFFER_ALIGNMENT_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTTexelBufferAlignmentSpecVersion = VK_EXT_TEXEL_BUFFER_ALIGNMENT_SPEC_VERSION; //=== VK_QCOM_render_pass_transform === VULKAN_HPP_CONSTEXPR_INLINE auto QCOMRenderPassTransformExtensionName = VK_QCOM_RENDER_PASS_TRANSFORM_EXTENSION_NAME; @@ -8089,26 +7979,20 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto NVPresentBarrierSpecVersion = VK_NV_PRESENT_BARRIER_SPEC_VERSION; //=== VK_KHR_shader_non_semantic_info === - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_non_semantic_info extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderNonSemanticInfoExtensionName = VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_shader_non_semantic_info extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderNonSemanticInfoSpecVersion = VK_KHR_SHADER_NON_SEMANTIC_INFO_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderNonSemanticInfoSpecVersion = VK_KHR_SHADER_NON_SEMANTIC_INFO_SPEC_VERSION; //=== VK_KHR_present_id === VULKAN_HPP_CONSTEXPR_INLINE auto KHRPresentIdExtensionName = VK_KHR_PRESENT_ID_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto KHRPresentIdSpecVersion = VK_KHR_PRESENT_ID_SPEC_VERSION; //=== VK_EXT_private_data === - VULKAN_HPP_DEPRECATED( "The VK_EXT_private_data extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTPrivateDataExtensionName = VK_EXT_PRIVATE_DATA_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_private_data extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTPrivateDataSpecVersion = VK_EXT_PRIVATE_DATA_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTPrivateDataSpecVersion = VK_EXT_PRIVATE_DATA_SPEC_VERSION; //=== VK_EXT_pipeline_creation_cache_control === - VULKAN_HPP_DEPRECATED( "The VK_EXT_pipeline_creation_cache_control extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTPipelineCreationCacheControlExtensionName = VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_pipeline_creation_cache_control extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTPipelineCreationCacheControlSpecVersion = VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTPipelineCreationCacheControlSpecVersion = VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_SPEC_VERSION; //=== VK_KHR_video_encode_queue === VULKAN_HPP_CONSTEXPR_INLINE auto KHRVideoEncodeQueueExtensionName = VK_KHR_VIDEO_ENCODE_QUEUE_EXTENSION_NAME; @@ -8139,10 +8023,8 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VK_USE_PLATFORM_METAL_EXT*/ //=== VK_KHR_synchronization2 === - VULKAN_HPP_DEPRECATED( "The VK_KHR_synchronization2 extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRSynchronization2ExtensionName = VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_synchronization2 extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRSynchronization2SpecVersion = VK_KHR_SYNCHRONIZATION_2_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRSynchronization2SpecVersion = VK_KHR_SYNCHRONIZATION_2_SPEC_VERSION; //=== VK_EXT_descriptor_buffer === VULKAN_HPP_CONSTEXPR_INLINE auto EXTDescriptorBufferExtensionName = VK_EXT_DESCRIPTOR_BUFFER_EXTENSION_NAME; @@ -8165,10 +8047,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderSubgroupUniformControlFlowSpecVersion = VK_KHR_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_SPEC_VERSION; //=== VK_KHR_zero_initialize_workgroup_memory === - VULKAN_HPP_DEPRECATED( "The VK_KHR_zero_initialize_workgroup_memory extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRZeroInitializeWorkgroupMemoryExtensionName = VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_zero_initialize_workgroup_memory extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRZeroInitializeWorkgroupMemorySpecVersion = VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRZeroInitializeWorkgroupMemorySpecVersion = VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_SPEC_VERSION; //=== VK_NV_fragment_shading_rate_enums === VULKAN_HPP_CONSTEXPR_INLINE auto NVFragmentShadingRateEnumsExtensionName = VK_NV_FRAGMENT_SHADING_RATE_ENUMS_EXTENSION_NAME; @@ -8183,10 +8063,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTMeshShaderSpecVersion = VK_EXT_MESH_SHADER_SPEC_VERSION; //=== VK_EXT_ycbcr_2plane_444_formats === - VULKAN_HPP_DEPRECATED( "The VK_EXT_ycbcr_2plane_444_formats extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTYcbcr2Plane444FormatsExtensionName = VK_EXT_YCBCR_2PLANE_444_FORMATS_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_ycbcr_2plane_444_formats extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTYcbcr2Plane444FormatsSpecVersion = VK_EXT_YCBCR_2PLANE_444_FORMATS_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTYcbcr2Plane444FormatsSpecVersion = VK_EXT_YCBCR_2PLANE_444_FORMATS_SPEC_VERSION; //=== VK_EXT_fragment_density_map2 === VULKAN_HPP_CONSTEXPR_INLINE auto EXTFragmentDensityMap2ExtensionName = VK_EXT_FRAGMENT_DENSITY_MAP_2_EXTENSION_NAME; @@ -8197,20 +8075,16 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto QCOMRotatedCopyCommandsSpecVersion = VK_QCOM_ROTATED_COPY_COMMANDS_SPEC_VERSION; //=== VK_EXT_image_robustness === - VULKAN_HPP_DEPRECATED( "The VK_EXT_image_robustness extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTImageRobustnessExtensionName = VK_EXT_IMAGE_ROBUSTNESS_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_image_robustness extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTImageRobustnessSpecVersion = VK_EXT_IMAGE_ROBUSTNESS_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTImageRobustnessSpecVersion = VK_EXT_IMAGE_ROBUSTNESS_SPEC_VERSION; //=== VK_KHR_workgroup_memory_explicit_layout === VULKAN_HPP_CONSTEXPR_INLINE auto KHRWorkgroupMemoryExplicitLayoutExtensionName = VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto KHRWorkgroupMemoryExplicitLayoutSpecVersion = VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_SPEC_VERSION; //=== VK_KHR_copy_commands2 === - VULKAN_HPP_DEPRECATED( "The VK_KHR_copy_commands2 extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRCopyCommands2ExtensionName = VK_KHR_COPY_COMMANDS_2_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_copy_commands2 extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRCopyCommands2SpecVersion = VK_KHR_COPY_COMMANDS_2_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRCopyCommands2SpecVersion = VK_KHR_COPY_COMMANDS_2_SPEC_VERSION; //=== VK_EXT_image_compression_control === VULKAN_HPP_CONSTEXPR_INLINE auto EXTImageCompressionControlExtensionName = VK_EXT_IMAGE_COMPRESSION_CONTROL_EXTENSION_NAME; @@ -8221,20 +8095,16 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTAttachmentFeedbackLoopLayoutSpecVersion = VK_EXT_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_SPEC_VERSION; //=== VK_EXT_4444_formats === - VULKAN_HPP_DEPRECATED( "The VK_EXT_4444_formats extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXT4444FormatsExtensionName = VK_EXT_4444_FORMATS_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_4444_formats extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXT4444FormatsSpecVersion = VK_EXT_4444_FORMATS_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXT4444FormatsSpecVersion = VK_EXT_4444_FORMATS_SPEC_VERSION; //=== VK_EXT_device_fault === VULKAN_HPP_CONSTEXPR_INLINE auto EXTDeviceFaultExtensionName = VK_EXT_DEVICE_FAULT_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto EXTDeviceFaultSpecVersion = VK_EXT_DEVICE_FAULT_SPEC_VERSION; //=== VK_ARM_rasterization_order_attachment_access === - VULKAN_HPP_DEPRECATED( "The VK_ARM_rasterization_order_attachment_access extension has been promoted to VK_EXT_rasterization_order_attachment_access." ) VULKAN_HPP_CONSTEXPR_INLINE auto ARMRasterizationOrderAttachmentAccessExtensionName = VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_ARM_rasterization_order_attachment_access extension has been promoted to VK_EXT_rasterization_order_attachment_access." ) - VULKAN_HPP_CONSTEXPR_INLINE auto ARMRasterizationOrderAttachmentAccessSpecVersion = VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto ARMRasterizationOrderAttachmentAccessSpecVersion = VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_SPEC_VERSION; //=== VK_EXT_rgba10x6_formats === VULKAN_HPP_CONSTEXPR_INLINE auto EXTRgba10X6FormatsExtensionName = VK_EXT_RGBA10X6_FORMATS_EXTENSION_NAME; @@ -8253,10 +8123,8 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ //=== VK_VALVE_mutable_descriptor_type === - VULKAN_HPP_DEPRECATED( "The VK_VALVE_mutable_descriptor_type extension has been promoted to VK_EXT_mutable_descriptor_type." ) VULKAN_HPP_CONSTEXPR_INLINE auto VALVEMutableDescriptorTypeExtensionName = VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_VALVE_mutable_descriptor_type extension has been promoted to VK_EXT_mutable_descriptor_type." ) - VULKAN_HPP_CONSTEXPR_INLINE auto VALVEMutableDescriptorTypeSpecVersion = VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto VALVEMutableDescriptorTypeSpecVersion = VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_SPEC_VERSION; //=== VK_EXT_vertex_input_dynamic_state === VULKAN_HPP_CONSTEXPR_INLINE auto EXTVertexInputDynamicStateExtensionName = VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME; @@ -8279,10 +8147,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTPrimitiveTopologyListRestartSpecVersion = VK_EXT_PRIMITIVE_TOPOLOGY_LIST_RESTART_SPEC_VERSION; //=== VK_KHR_format_feature_flags2 === - VULKAN_HPP_DEPRECATED( "The VK_KHR_format_feature_flags2 extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRFormatFeatureFlags2ExtensionName = VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_format_feature_flags2 extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRFormatFeatureFlags2SpecVersion = VK_KHR_FORMAT_FEATURE_FLAGS_2_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRFormatFeatureFlags2SpecVersion = VK_KHR_FORMAT_FEATURE_FLAGS_2_SPEC_VERSION; #if defined( VK_USE_PLATFORM_FUCHSIA ) //=== VK_FUCHSIA_external_memory === @@ -8327,10 +8193,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTMultisampledRenderToSingleSampledSpecVersion = VK_EXT_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_SPEC_VERSION; //=== VK_EXT_extended_dynamic_state2 === - VULKAN_HPP_DEPRECATED( "The VK_EXT_extended_dynamic_state2 extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTExtendedDynamicState2ExtensionName = VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_extended_dynamic_state2 extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTExtendedDynamicState2SpecVersion = VK_EXT_EXTENDED_DYNAMIC_STATE_2_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTExtendedDynamicState2SpecVersion = VK_EXT_EXTENDED_DYNAMIC_STATE_2_SPEC_VERSION; #if defined( VK_USE_PLATFORM_SCREEN_QNX ) //=== VK_QNX_screen_surface === @@ -8351,10 +8215,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto KHRRayTracingMaintenance1SpecVersion = VK_KHR_RAY_TRACING_MAINTENANCE_1_SPEC_VERSION; //=== VK_EXT_global_priority_query === - VULKAN_HPP_DEPRECATED( "The VK_EXT_global_priority_query extension has been promoted to VK_KHR_global_priority." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTGlobalPriorityQueryExtensionName = VK_EXT_GLOBAL_PRIORITY_QUERY_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_global_priority_query extension has been promoted to VK_KHR_global_priority." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTGlobalPriorityQuerySpecVersion = VK_EXT_GLOBAL_PRIORITY_QUERY_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTGlobalPriorityQuerySpecVersion = VK_EXT_GLOBAL_PRIORITY_QUERY_SPEC_VERSION; //=== VK_EXT_image_view_min_lod === VULKAN_HPP_CONSTEXPR_INLINE auto EXTImageViewMinLodExtensionName = VK_EXT_IMAGE_VIEW_MIN_LOD_EXTENSION_NAME; @@ -8387,10 +8249,8 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VK_ENABLE_BETA_EXTENSIONS*/ //=== VK_EXT_load_store_op_none === - VULKAN_HPP_DEPRECATED( "The VK_EXT_load_store_op_none extension has been promoted to VK_KHR_load_store_op_none." ) VULKAN_HPP_CONSTEXPR_INLINE auto EXTLoadStoreOpNoneExtensionName = VK_EXT_LOAD_STORE_OP_NONE_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_EXT_load_store_op_none extension has been promoted to VK_KHR_load_store_op_none." ) - VULKAN_HPP_CONSTEXPR_INLINE auto EXTLoadStoreOpNoneSpecVersion = VK_EXT_LOAD_STORE_OP_NONE_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTLoadStoreOpNoneSpecVersion = VK_EXT_LOAD_STORE_OP_NONE_SPEC_VERSION; //=== VK_HUAWEI_cluster_culling_shader === VULKAN_HPP_CONSTEXPR_INLINE auto HUAWEIClusterCullingShaderExtensionName = VK_HUAWEI_CLUSTER_CULLING_SHADER_EXTENSION_NAME; @@ -8405,10 +8265,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTPageableDeviceLocalMemorySpecVersion = VK_EXT_PAGEABLE_DEVICE_LOCAL_MEMORY_SPEC_VERSION; //=== VK_KHR_maintenance4 === - VULKAN_HPP_DEPRECATED( "The VK_KHR_maintenance4 extension has been promoted to core in version 1.3." ) VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance4ExtensionName = VK_KHR_MAINTENANCE_4_EXTENSION_NAME; - VULKAN_HPP_DEPRECATED( "The VK_KHR_maintenance4 extension has been promoted to core in version 1.3." ) - VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance4SpecVersion = VK_KHR_MAINTENANCE_4_SPEC_VERSION; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance4SpecVersion = VK_KHR_MAINTENANCE_4_SPEC_VERSION; //=== VK_ARM_shader_core_properties === VULKAN_HPP_CONSTEXPR_INLINE auto ARMShaderCorePropertiesExtensionName = VK_ARM_SHADER_CORE_PROPERTIES_EXTENSION_NAME; @@ -8528,6 +8386,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance5ExtensionName = VK_KHR_MAINTENANCE_5_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance5SpecVersion = VK_KHR_MAINTENANCE_5_SPEC_VERSION; + //=== VK_AMD_anti_lag === + VULKAN_HPP_CONSTEXPR_INLINE auto AMDAntiLagExtensionName = VK_AMD_ANTI_LAG_EXTENSION_NAME; + VULKAN_HPP_CONSTEXPR_INLINE auto AMDAntiLagSpecVersion = VK_AMD_ANTI_LAG_SPEC_VERSION; + //=== VK_KHR_ray_tracing_position_fetch === VULKAN_HPP_CONSTEXPR_INLINE auto KHRRayTracingPositionFetchExtensionName = VK_KHR_RAY_TRACING_POSITION_FETCH_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto KHRRayTracingPositionFetchSpecVersion = VK_KHR_RAY_TRACING_POSITION_FETCH_SPEC_VERSION; @@ -8536,6 +8398,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTShaderObjectExtensionName = VK_EXT_SHADER_OBJECT_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto EXTShaderObjectSpecVersion = VK_EXT_SHADER_OBJECT_SPEC_VERSION; + //=== VK_KHR_pipeline_binary === + VULKAN_HPP_CONSTEXPR_INLINE auto KHRPipelineBinaryExtensionName = VK_KHR_PIPELINE_BINARY_EXTENSION_NAME; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRPipelineBinarySpecVersion = VK_KHR_PIPELINE_BINARY_SPEC_VERSION; + //=== VK_QCOM_tile_properties === VULKAN_HPP_CONSTEXPR_INLINE auto QCOMTilePropertiesExtensionName = VK_QCOM_TILE_PROPERTIES_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto QCOMTilePropertiesSpecVersion = VK_QCOM_TILE_PROPERTIES_SPEC_VERSION; @@ -8560,6 +8426,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto EXTMutableDescriptorTypeExtensionName = VK_EXT_MUTABLE_DESCRIPTOR_TYPE_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto EXTMutableDescriptorTypeSpecVersion = VK_EXT_MUTABLE_DESCRIPTOR_TYPE_SPEC_VERSION; + //=== VK_EXT_legacy_vertex_attributes === + VULKAN_HPP_CONSTEXPR_INLINE auto EXTLegacyVertexAttributesExtensionName = VK_EXT_LEGACY_VERTEX_ATTRIBUTES_EXTENSION_NAME; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTLegacyVertexAttributesSpecVersion = VK_EXT_LEGACY_VERTEX_ATTRIBUTES_SPEC_VERSION; + //=== VK_EXT_layer_settings === VULKAN_HPP_CONSTEXPR_INLINE auto EXTLayerSettingsExtensionName = VK_EXT_LAYER_SETTINGS_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto EXTLayerSettingsSpecVersion = VK_EXT_LAYER_SETTINGS_SPEC_VERSION; @@ -8588,6 +8458,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto QCOMMultiviewPerViewRenderAreasExtensionName = VK_QCOM_MULTIVIEW_PER_VIEW_RENDER_AREAS_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto QCOMMultiviewPerViewRenderAreasSpecVersion = VK_QCOM_MULTIVIEW_PER_VIEW_RENDER_AREAS_SPEC_VERSION; + //=== VK_KHR_compute_shader_derivatives === + VULKAN_HPP_CONSTEXPR_INLINE auto KHRComputeShaderDerivativesExtensionName = VK_KHR_COMPUTE_SHADER_DERIVATIVES_EXTENSION_NAME; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRComputeShaderDerivativesSpecVersion = VK_KHR_COMPUTE_SHADER_DERIVATIVES_SPEC_VERSION; + //=== VK_KHR_video_decode_av1 === VULKAN_HPP_CONSTEXPR_INLINE auto KHRVideoDecodeAv1ExtensionName = VK_KHR_VIDEO_DECODE_AV1_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto KHRVideoDecodeAv1SpecVersion = VK_KHR_VIDEO_DECODE_AV1_SPEC_VERSION; @@ -8670,14 +8544,34 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto NVRawAccessChainsExtensionName = VK_NV_RAW_ACCESS_CHAINS_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto NVRawAccessChainsSpecVersion = VK_NV_RAW_ACCESS_CHAINS_SPEC_VERSION; + //=== VK_KHR_shader_relaxed_extended_instruction === + VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderRelaxedExtendedInstructionExtensionName = VK_KHR_SHADER_RELAXED_EXTENDED_INSTRUCTION_EXTENSION_NAME; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRShaderRelaxedExtendedInstructionSpecVersion = VK_KHR_SHADER_RELAXED_EXTENDED_INSTRUCTION_SPEC_VERSION; + + //=== VK_NV_command_buffer_inheritance === + VULKAN_HPP_CONSTEXPR_INLINE auto NVCommandBufferInheritanceExtensionName = VK_NV_COMMAND_BUFFER_INHERITANCE_EXTENSION_NAME; + VULKAN_HPP_CONSTEXPR_INLINE auto NVCommandBufferInheritanceSpecVersion = VK_NV_COMMAND_BUFFER_INHERITANCE_SPEC_VERSION; + + //=== VK_KHR_maintenance7 === + VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance7ExtensionName = VK_KHR_MAINTENANCE_7_EXTENSION_NAME; + VULKAN_HPP_CONSTEXPR_INLINE auto KHRMaintenance7SpecVersion = VK_KHR_MAINTENANCE_7_SPEC_VERSION; + //=== VK_NV_shader_atomic_float16_vector === VULKAN_HPP_CONSTEXPR_INLINE auto NVShaderAtomicFloat16VectorExtensionName = VK_NV_SHADER_ATOMIC_FLOAT16_VECTOR_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto NVShaderAtomicFloat16VectorSpecVersion = VK_NV_SHADER_ATOMIC_FLOAT16_VECTOR_SPEC_VERSION; + //=== VK_EXT_shader_replicated_composites === + VULKAN_HPP_CONSTEXPR_INLINE auto EXTShaderReplicatedCompositesExtensionName = VK_EXT_SHADER_REPLICATED_COMPOSITES_EXTENSION_NAME; + VULKAN_HPP_CONSTEXPR_INLINE auto EXTShaderReplicatedCompositesSpecVersion = VK_EXT_SHADER_REPLICATED_COMPOSITES_SPEC_VERSION; + //=== VK_NV_ray_tracing_validation === VULKAN_HPP_CONSTEXPR_INLINE auto NVRayTracingValidationExtensionName = VK_NV_RAY_TRACING_VALIDATION_EXTENSION_NAME; VULKAN_HPP_CONSTEXPR_INLINE auto NVRayTracingValidationSpecVersion = VK_NV_RAY_TRACING_VALIDATION_SPEC_VERSION; + //=== VK_MESA_image_alignment_control === + VULKAN_HPP_CONSTEXPR_INLINE auto MESAImageAlignmentControlExtensionName = VK_MESA_IMAGE_ALIGNMENT_CONTROL_EXTENSION_NAME; + VULKAN_HPP_CONSTEXPR_INLINE auto MESAImageAlignmentControlSpecVersion = VK_MESA_IMAGE_ALIGNMENT_CONTROL_SPEC_VERSION; + } // namespace VULKAN_HPP_NAMESPACE // clang-format off @@ -12062,25 +11956,6 @@ namespace VULKAN_HPP_NAMESPACE }; # endif /*VK_USE_PLATFORM_GGP*/ - //=== VK_NV_compute_shader_derivatives === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - //=== VK_NV_mesh_shader === template <> struct StructExtends @@ -12543,6 +12418,24 @@ namespace VULKAN_HPP_NAMESPACE }; }; + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + //=== VK_KHR_present_wait === template <> struct StructExtends @@ -15711,6 +15604,25 @@ namespace VULKAN_HPP_NAMESPACE }; }; + //=== VK_AMD_anti_lag === + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + //=== VK_KHR_ray_tracing_position_fetch === template <> struct StructExtends @@ -15758,6 +15670,70 @@ namespace VULKAN_HPP_NAMESPACE }; }; + //=== VK_KHR_pipeline_binary === + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + //=== VK_QCOM_tile_properties === template <> struct StructExtends @@ -15917,6 +15893,34 @@ namespace VULKAN_HPP_NAMESPACE }; }; + //=== VK_EXT_legacy_vertex_attributes === + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + //=== VK_EXT_layer_settings === template <> struct StructExtends @@ -16095,6 +16099,34 @@ namespace VULKAN_HPP_NAMESPACE }; }; + //=== VK_KHR_compute_shader_derivatives === + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + //=== VK_KHR_video_decode_av1 === template <> struct StructExtends @@ -16628,6 +16660,90 @@ namespace VULKAN_HPP_NAMESPACE }; }; + //=== VK_KHR_shader_relaxed_extended_instruction === + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + //=== VK_NV_command_buffer_inheritance === + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + //=== VK_KHR_maintenance7 === + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + //=== VK_NV_shader_atomic_float16_vector === template <> struct StructExtends @@ -16647,6 +16763,25 @@ namespace VULKAN_HPP_NAMESPACE }; }; + //=== VK_EXT_shader_replicated_composites === + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + //=== VK_NV_ray_tracing_validation === template <> struct StructExtends @@ -16666,6 +16801,43 @@ namespace VULKAN_HPP_NAMESPACE }; }; + //=== VK_MESA_image_alignment_control === + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + #endif // VULKAN_HPP_DISABLE_ENHANCED_MODE #if VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL @@ -17861,12 +18033,22 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR = 0; PFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR = 0; + //=== VK_AMD_anti_lag === + PFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD = 0; + //=== VK_EXT_shader_object === PFN_vkCreateShadersEXT vkCreateShadersEXT = 0; PFN_vkDestroyShaderEXT vkDestroyShaderEXT = 0; PFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT = 0; PFN_vkCmdBindShadersEXT vkCmdBindShadersEXT = 0; + //=== VK_KHR_pipeline_binary === + PFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR = 0; + PFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR = 0; + PFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR = 0; + PFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR = 0; + PFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR = 0; + //=== VK_QCOM_tile_properties === PFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM = 0; PFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM = 0; @@ -19263,12 +19445,22 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkGetDeviceImageSubresourceLayoutKHR( vkGetInstanceProcAddr( instance, "vkGetDeviceImageSubresourceLayoutKHR" ) ); vkGetImageSubresourceLayout2KHR = PFN_vkGetImageSubresourceLayout2KHR( vkGetInstanceProcAddr( instance, "vkGetImageSubresourceLayout2KHR" ) ); + //=== VK_AMD_anti_lag === + vkAntiLagUpdateAMD = PFN_vkAntiLagUpdateAMD( vkGetInstanceProcAddr( instance, "vkAntiLagUpdateAMD" ) ); + //=== VK_EXT_shader_object === vkCreateShadersEXT = PFN_vkCreateShadersEXT( vkGetInstanceProcAddr( instance, "vkCreateShadersEXT" ) ); vkDestroyShaderEXT = PFN_vkDestroyShaderEXT( vkGetInstanceProcAddr( instance, "vkDestroyShaderEXT" ) ); vkGetShaderBinaryDataEXT = PFN_vkGetShaderBinaryDataEXT( vkGetInstanceProcAddr( instance, "vkGetShaderBinaryDataEXT" ) ); vkCmdBindShadersEXT = PFN_vkCmdBindShadersEXT( vkGetInstanceProcAddr( instance, "vkCmdBindShadersEXT" ) ); + //=== VK_KHR_pipeline_binary === + vkCreatePipelineBinariesKHR = PFN_vkCreatePipelineBinariesKHR( vkGetInstanceProcAddr( instance, "vkCreatePipelineBinariesKHR" ) ); + vkDestroyPipelineBinaryKHR = PFN_vkDestroyPipelineBinaryKHR( vkGetInstanceProcAddr( instance, "vkDestroyPipelineBinaryKHR" ) ); + vkGetPipelineKeyKHR = PFN_vkGetPipelineKeyKHR( vkGetInstanceProcAddr( instance, "vkGetPipelineKeyKHR" ) ); + vkGetPipelineBinaryDataKHR = PFN_vkGetPipelineBinaryDataKHR( vkGetInstanceProcAddr( instance, "vkGetPipelineBinaryDataKHR" ) ); + vkReleaseCapturedPipelineDataKHR = PFN_vkReleaseCapturedPipelineDataKHR( vkGetInstanceProcAddr( instance, "vkReleaseCapturedPipelineDataKHR" ) ); + //=== VK_QCOM_tile_properties === vkGetFramebufferTilePropertiesQCOM = PFN_vkGetFramebufferTilePropertiesQCOM( vkGetInstanceProcAddr( instance, "vkGetFramebufferTilePropertiesQCOM" ) ); vkGetDynamicRenderingTilePropertiesQCOM = @@ -20313,12 +20505,22 @@ namespace VULKAN_HPP_NAMESPACE vkGetDeviceImageSubresourceLayoutKHR = PFN_vkGetDeviceImageSubresourceLayoutKHR( vkGetDeviceProcAddr( device, "vkGetDeviceImageSubresourceLayoutKHR" ) ); vkGetImageSubresourceLayout2KHR = PFN_vkGetImageSubresourceLayout2KHR( vkGetDeviceProcAddr( device, "vkGetImageSubresourceLayout2KHR" ) ); + //=== VK_AMD_anti_lag === + vkAntiLagUpdateAMD = PFN_vkAntiLagUpdateAMD( vkGetDeviceProcAddr( device, "vkAntiLagUpdateAMD" ) ); + //=== VK_EXT_shader_object === vkCreateShadersEXT = PFN_vkCreateShadersEXT( vkGetDeviceProcAddr( device, "vkCreateShadersEXT" ) ); vkDestroyShaderEXT = PFN_vkDestroyShaderEXT( vkGetDeviceProcAddr( device, "vkDestroyShaderEXT" ) ); vkGetShaderBinaryDataEXT = PFN_vkGetShaderBinaryDataEXT( vkGetDeviceProcAddr( device, "vkGetShaderBinaryDataEXT" ) ); vkCmdBindShadersEXT = PFN_vkCmdBindShadersEXT( vkGetDeviceProcAddr( device, "vkCmdBindShadersEXT" ) ); + //=== VK_KHR_pipeline_binary === + vkCreatePipelineBinariesKHR = PFN_vkCreatePipelineBinariesKHR( vkGetDeviceProcAddr( device, "vkCreatePipelineBinariesKHR" ) ); + vkDestroyPipelineBinaryKHR = PFN_vkDestroyPipelineBinaryKHR( vkGetDeviceProcAddr( device, "vkDestroyPipelineBinaryKHR" ) ); + vkGetPipelineKeyKHR = PFN_vkGetPipelineKeyKHR( vkGetDeviceProcAddr( device, "vkGetPipelineKeyKHR" ) ); + vkGetPipelineBinaryDataKHR = PFN_vkGetPipelineBinaryDataKHR( vkGetDeviceProcAddr( device, "vkGetPipelineBinaryDataKHR" ) ); + vkReleaseCapturedPipelineDataKHR = PFN_vkReleaseCapturedPipelineDataKHR( vkGetDeviceProcAddr( device, "vkReleaseCapturedPipelineDataKHR" ) ); + //=== VK_QCOM_tile_properties === vkGetFramebufferTilePropertiesQCOM = PFN_vkGetFramebufferTilePropertiesQCOM( vkGetDeviceProcAddr( device, "vkGetFramebufferTilePropertiesQCOM" ) ); vkGetDynamicRenderingTilePropertiesQCOM = diff --git a/third_party/vulkan/vulkan_core.h b/third_party/vulkan/vulkan_core.h index 1f30669..6f1c17f 100644 --- a/third_party/vulkan/vulkan_core.h +++ b/third_party/vulkan/vulkan_core.h @@ -69,21 +69,25 @@ extern "C" { #define VK_API_VERSION_1_0 VK_MAKE_API_VERSION(0, 1, 0, 0)// Patch version should always be set to 0 // Version of this file -#define VK_HEADER_VERSION 281 +#define VK_HEADER_VERSION 295 // Complete version of this file #define VK_HEADER_VERSION_COMPLETE VK_MAKE_API_VERSION(0, 1, 3, VK_HEADER_VERSION) +// VK_MAKE_VERSION is deprecated, but no reason was given in the API XML // DEPRECATED: This define is deprecated. VK_MAKE_API_VERSION should be used instead. #define VK_MAKE_VERSION(major, minor, patch) \ ((((uint32_t)(major)) << 22U) | (((uint32_t)(minor)) << 12U) | ((uint32_t)(patch))) +// VK_VERSION_MAJOR is deprecated, but no reason was given in the API XML // DEPRECATED: This define is deprecated. VK_API_VERSION_MAJOR should be used instead. #define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22U) +// VK_VERSION_MINOR is deprecated, but no reason was given in the API XML // DEPRECATED: This define is deprecated. VK_API_VERSION_MINOR should be used instead. #define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12U) & 0x3FFU) +// VK_VERSION_PATCH is deprecated, but no reason was given in the API XML // DEPRECATED: This define is deprecated. VK_API_VERSION_PATCH should be used instead. #define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xFFFU) @@ -185,6 +189,8 @@ typedef enum VkResult { VK_ERROR_INVALID_VIDEO_STD_PARAMETERS_KHR = -1000299000, VK_ERROR_COMPRESSION_EXHAUSTED_EXT = -1000338000, VK_INCOMPATIBLE_SHADER_BINARY_EXT = 1000482000, + VK_PIPELINE_BINARY_MISSING_KHR = 1000483000, + VK_ERROR_NOT_ENOUGH_SPACE_KHR = -1000483000, VK_ERROR_OUT_OF_POOL_MEMORY_KHR = VK_ERROR_OUT_OF_POOL_MEMORY, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR = VK_ERROR_INVALID_EXTERNAL_HANDLE, VK_ERROR_FRAGMENTATION_EXT = VK_ERROR_FRAGMENTATION, @@ -193,6 +199,7 @@ typedef enum VkResult { VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, VK_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED, VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED, + // VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT is a deprecated alias VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT = VK_INCOMPATIBLE_SHADER_BINARY_EXT, VK_RESULT_MAX_ENUM = 0x7FFFFFFF } VkResult; @@ -689,7 +696,6 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000, VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP = 1000191000, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV = 1000201000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV = 1000202000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV = 1000202001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV = 1000204000, @@ -1031,10 +1037,23 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_KHR = 1000338003, VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR = 1000470005, VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR = 1000470006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ANTI_LAG_FEATURES_AMD = 1000476000, + VK_STRUCTURE_TYPE_ANTI_LAG_DATA_AMD = 1000476001, + VK_STRUCTURE_TYPE_ANTI_LAG_PRESENTATION_INFO_AMD = 1000476002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_POSITION_FETCH_FEATURES_KHR = 1000481000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_OBJECT_FEATURES_EXT = 1000482000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_OBJECT_PROPERTIES_EXT = 1000482001, VK_STRUCTURE_TYPE_SHADER_CREATE_INFO_EXT = 1000482002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_BINARY_FEATURES_KHR = 1000483000, + VK_STRUCTURE_TYPE_PIPELINE_BINARY_CREATE_INFO_KHR = 1000483001, + VK_STRUCTURE_TYPE_PIPELINE_BINARY_INFO_KHR = 1000483002, + VK_STRUCTURE_TYPE_PIPELINE_BINARY_KEY_KHR = 1000483003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_BINARY_PROPERTIES_KHR = 1000483004, + VK_STRUCTURE_TYPE_RELEASE_CAPTURED_PIPELINE_DATA_INFO_KHR = 1000483005, + VK_STRUCTURE_TYPE_PIPELINE_BINARY_DATA_INFO_KHR = 1000483006, + VK_STRUCTURE_TYPE_PIPELINE_CREATE_INFO_KHR = 1000483007, + VK_STRUCTURE_TYPE_DEVICE_PIPELINE_BINARY_INTERNAL_CACHE_CONTROL_KHR = 1000483008, + VK_STRUCTURE_TYPE_PIPELINE_BINARY_HANDLES_INFO_KHR = 1000483009, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_PROPERTIES_FEATURES_QCOM = 1000484000, VK_STRUCTURE_TYPE_TILE_PROPERTIES_QCOM = 1000484001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_AMIGO_PROFILING_FEATURES_SEC = 1000485000, @@ -1046,6 +1065,8 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_SPARSE_ADDRESS_SPACE_PROPERTIES_NV = 1000492001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT = 1000351000, VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT = 1000351002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_VERTEX_ATTRIBUTES_FEATURES_EXT = 1000495000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_VERTEX_ATTRIBUTES_PROPERTIES_EXT = 1000495001, VK_STRUCTURE_TYPE_LAYER_SETTINGS_CREATE_INFO_EXT = 1000496000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_FEATURES_ARM = 1000497000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_PROPERTIES_ARM = 1000497001, @@ -1065,6 +1086,8 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_KHR = 1000506002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_RENDER_AREAS_FEATURES_QCOM = 1000510000, VK_STRUCTURE_TYPE_MULTIVIEW_PER_VIEW_RENDER_AREAS_RENDER_PASS_BEGIN_INFO_QCOM = 1000510001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_KHR = 1000201000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_PROPERTIES_KHR = 1000511000, VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_CAPABILITIES_KHR = 1000512000, VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_PICTURE_INFO_KHR = 1000512001, VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_PROFILE_INFO_KHR = 1000512003, @@ -1110,10 +1133,22 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_BUFFER_EMBEDDED_SAMPLERS_INFO_EXT = 1000545008, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_POOL_OVERALLOCATION_FEATURES_NV = 1000546000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAW_ACCESS_CHAINS_FEATURES_NV = 1000555000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_RELAXED_EXTENDED_INSTRUCTION_FEATURES_KHR = 1000558000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMMAND_BUFFER_INHERITANCE_FEATURES_NV = 1000559000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_7_FEATURES_KHR = 1000562000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_7_PROPERTIES_KHR = 1000562001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_API_PROPERTIES_LIST_KHR = 1000562002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_API_PROPERTIES_KHR = 1000562003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_API_VULKAN_PROPERTIES_KHR = 1000562004, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT16_VECTOR_FEATURES_NV = 1000563000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_REPLICATED_COMPOSITES_FEATURES_EXT = 1000564000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_VALIDATION_FEATURES_NV = 1000568000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ALIGNMENT_CONTROL_FEATURES_MESA = 1000575000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ALIGNMENT_CONTROL_PROPERTIES_MESA = 1000575001, + VK_STRUCTURE_TYPE_IMAGE_ALIGNMENT_CONTROL_CREATE_INFO_MESA = 1000575002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES, + // VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT is a deprecated alias VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, VK_STRUCTURE_TYPE_RENDERING_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_INFO, VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO, @@ -1158,6 +1193,7 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES, VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, + // VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT is a deprecated alias VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES, VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO, @@ -1220,6 +1256,7 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES, VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_KHR, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES, @@ -1227,6 +1264,7 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO, VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO, VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO, + // VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL is a deprecated alias VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES, @@ -1397,6 +1435,7 @@ typedef enum VkObjectType { VK_OBJECT_TYPE_MICROMAP_EXT = 1000396000, VK_OBJECT_TYPE_OPTICAL_FLOW_SESSION_NV = 1000464000, VK_OBJECT_TYPE_SHADER_EXT = 1000482000, + VK_OBJECT_TYPE_PIPELINE_BINARY_KHR = 1000483000, VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT = VK_OBJECT_TYPE_PRIVATE_DATA_SLOT, @@ -1404,6 +1443,7 @@ typedef enum VkObjectType { } VkObjectType; typedef enum VkVendorId { + VK_VENDOR_ID_KHRONOS = 0x10000, VK_VENDOR_ID_VIV = 0x10001, VK_VENDOR_ID_VSI = 0x10002, VK_VENDOR_ID_KAZAN = 0x10003, @@ -1676,7 +1716,7 @@ typedef enum VkFormat { VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005, VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006, VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007, - VK_FORMAT_R16G16_S10_5_NV = 1000464000, + VK_FORMAT_R16G16_SFIXED5_NV = 1000464000, VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR = 1000470000, VK_FORMAT_A8_UNORM_KHR = 1000470001, VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK, @@ -1733,6 +1773,8 @@ typedef enum VkFormat { VK_FORMAT_G16_B16R16_2PLANE_444_UNORM_EXT = VK_FORMAT_G16_B16R16_2PLANE_444_UNORM, VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT = VK_FORMAT_A4R4G4B4_UNORM_PACK16, VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT = VK_FORMAT_A4B4G4R4_UNORM_PACK16, + // VK_FORMAT_R16G16_S10_5_NV is a deprecated alias + VK_FORMAT_R16G16_S10_5_NV = VK_FORMAT_R16G16_SFIXED5_NV, VK_FORMAT_MAX_ENUM = 0x7FFFFFFF } VkFormat; @@ -2083,6 +2125,7 @@ typedef enum VkSamplerAddressMode { VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3, VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4, + // VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE_KHR is a deprecated alias VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE_KHR = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE, VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF } VkSamplerAddressMode; @@ -2166,7 +2209,8 @@ typedef enum VkIndexType { typedef enum VkSubpassContents { VK_SUBPASS_CONTENTS_INLINE = 0, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1, - VK_SUBPASS_CONTENTS_INLINE_AND_SECONDARY_COMMAND_BUFFERS_EXT = 1000451000, + VK_SUBPASS_CONTENTS_INLINE_AND_SECONDARY_COMMAND_BUFFERS_KHR = 1000451000, + VK_SUBPASS_CONTENTS_INLINE_AND_SECONDARY_COMMAND_BUFFERS_EXT = VK_SUBPASS_CONTENTS_INLINE_AND_SECONDARY_COMMAND_BUFFERS_KHR, VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF } VkSubpassContents; @@ -2607,7 +2651,9 @@ typedef enum VkPipelineCreateFlagBits { VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT = 0x08000000, VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT = 0x40000000, VK_PIPELINE_CREATE_DISPATCH_BASE = VK_PIPELINE_CREATE_DISPATCH_BASE_BIT, + // VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR is a deprecated alias VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, + // VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT is a deprecated alias VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT = VK_PIPELINE_CREATE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT, VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE, @@ -2816,6 +2862,7 @@ typedef enum VkStencilFaceFlagBits { VK_STENCIL_FACE_FRONT_BIT = 0x00000001, VK_STENCIL_FACE_BACK_BIT = 0x00000002, VK_STENCIL_FACE_FRONT_AND_BACK = 0x00000003, + // VK_STENCIL_FRONT_AND_BACK is a deprecated alias VK_STENCIL_FRONT_AND_BACK = VK_STENCIL_FACE_FRONT_AND_BACK, VK_STENCIL_FACE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkStencilFaceFlagBits; @@ -3226,7 +3273,9 @@ typedef struct VkDeviceCreateInfo { VkDeviceCreateFlags flags; uint32_t queueCreateInfoCount; const VkDeviceQueueCreateInfo* pQueueCreateInfos; + // enabledLayerCount is deprecated and should not be used uint32_t enabledLayerCount; + // ppEnabledLayerNames is deprecated and should not be used const char* const* ppEnabledLayerNames; uint32_t enabledExtensionCount; const char* const* ppEnabledExtensionNames; @@ -5784,7 +5833,8 @@ typedef enum VkDriverId { VK_DRIVER_ID_MESA_DOZEN = 23, VK_DRIVER_ID_MESA_NVK = 24, VK_DRIVER_ID_IMAGINATION_OPEN_SOURCE_MESA = 25, - VK_DRIVER_ID_MESA_AGXV = 26, + VK_DRIVER_ID_MESA_HONEYKRISP = 26, + VK_DRIVER_ID_RESERVED_27 = 27, VK_DRIVER_ID_AMD_PROPRIETARY_KHR = VK_DRIVER_ID_AMD_PROPRIETARY, VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR = VK_DRIVER_ID_AMD_OPEN_SOURCE, VK_DRIVER_ID_MESA_RADV_KHR = VK_DRIVER_ID_MESA_RADV, @@ -6617,6 +6667,7 @@ static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_NV = 0 static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT = 0x00080000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT = 0x00100000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI = 0x8000000000ULL; +// VK_PIPELINE_STAGE_2_SUBPASS_SHADING_BIT_HUAWEI is a deprecated alias static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_SUBPASS_SHADING_BIT_HUAWEI = 0x8000000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INVOCATION_MASK_BIT_HUAWEI = 0x10000000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR = 0x10000000ULL; @@ -6708,11 +6759,12 @@ typedef enum VkRenderingFlagBits { VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT = 0x00000001, VK_RENDERING_SUSPENDING_BIT = 0x00000002, VK_RENDERING_RESUMING_BIT = 0x00000004, - VK_RENDERING_CONTENTS_INLINE_BIT_EXT = 0x00000010, VK_RENDERING_ENABLE_LEGACY_DITHERING_BIT_EXT = 0x00000008, + VK_RENDERING_CONTENTS_INLINE_BIT_KHR = 0x00000010, VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT_KHR = VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT, VK_RENDERING_SUSPENDING_BIT_KHR = VK_RENDERING_SUSPENDING_BIT, VK_RENDERING_RESUMING_BIT_KHR = VK_RENDERING_RESUMING_BIT, + VK_RENDERING_CONTENTS_INLINE_BIT_EXT = VK_RENDERING_CONTENTS_INLINE_BIT_KHR, VK_RENDERING_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkRenderingFlagBits; typedef VkFlags VkRenderingFlags; @@ -7569,6 +7621,7 @@ typedef enum VkColorSpaceKHR { VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006, VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007, VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008, + // VK_COLOR_SPACE_DOLBYVISION_EXT is deprecated, but no reason was given in the API XML VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009, VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010, VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011, @@ -7576,7 +7629,9 @@ typedef enum VkColorSpaceKHR { VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013, VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT = 1000104014, VK_COLOR_SPACE_DISPLAY_NATIVE_AMD = 1000213000, + // VK_COLORSPACE_SRGB_NONLINEAR_KHR is a deprecated alias VK_COLORSPACE_SRGB_NONLINEAR_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + // VK_COLOR_SPACE_DCI_P3_LINEAR_EXT is a deprecated alias VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT, VK_COLOR_SPACE_MAX_ENUM_KHR = 0x7FFFFFFF } VkColorSpaceKHR; @@ -9007,7 +9062,9 @@ VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBaseKHR( #define VK_KHR_maintenance1 1 #define VK_KHR_MAINTENANCE_1_SPEC_VERSION 2 #define VK_KHR_MAINTENANCE_1_EXTENSION_NAME "VK_KHR_maintenance1" +// VK_KHR_MAINTENANCE1_SPEC_VERSION is a deprecated alias #define VK_KHR_MAINTENANCE1_SPEC_VERSION VK_KHR_MAINTENANCE_1_SPEC_VERSION +// VK_KHR_MAINTENANCE1_EXTENSION_NAME is a deprecated alias #define VK_KHR_MAINTENANCE1_EXTENSION_NAME VK_KHR_MAINTENANCE_1_EXTENSION_NAME typedef VkCommandPoolTrimFlags VkCommandPoolTrimFlagsKHR; @@ -9489,8 +9546,11 @@ typedef enum VkPerformanceCounterScopeKHR { VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR = 0, VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR = 1, VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR = 2, + // VK_QUERY_SCOPE_COMMAND_BUFFER_KHR is a deprecated alias VK_QUERY_SCOPE_COMMAND_BUFFER_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR, + // VK_QUERY_SCOPE_RENDER_PASS_KHR is a deprecated alias VK_QUERY_SCOPE_RENDER_PASS_KHR = VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR, + // VK_QUERY_SCOPE_COMMAND_KHR is a deprecated alias VK_QUERY_SCOPE_COMMAND_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR, VK_PERFORMANCE_COUNTER_SCOPE_MAX_ENUM_KHR = 0x7FFFFFFF } VkPerformanceCounterScopeKHR; @@ -9508,7 +9568,9 @@ typedef enum VkPerformanceCounterStorageKHR { typedef enum VkPerformanceCounterDescriptionFlagBitsKHR { VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR = 0x00000001, VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR = 0x00000002, + // VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR is a deprecated alias VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR = VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR, + // VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR is a deprecated alias VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR = VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR, VK_PERFORMANCE_COUNTER_DESCRIPTION_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF } VkPerformanceCounterDescriptionFlagBitsKHR; @@ -9610,7 +9672,9 @@ VKAPI_ATTR void VKAPI_CALL vkReleaseProfilingLockKHR( #define VK_KHR_maintenance2 1 #define VK_KHR_MAINTENANCE_2_SPEC_VERSION 1 #define VK_KHR_MAINTENANCE_2_EXTENSION_NAME "VK_KHR_maintenance2" +// VK_KHR_MAINTENANCE2_SPEC_VERSION is a deprecated alias #define VK_KHR_MAINTENANCE2_SPEC_VERSION VK_KHR_MAINTENANCE_2_SPEC_VERSION +// VK_KHR_MAINTENANCE2_EXTENSION_NAME is a deprecated alias #define VK_KHR_MAINTENANCE2_EXTENSION_NAME VK_KHR_MAINTENANCE_2_EXTENSION_NAME typedef VkPointClippingBehavior VkPointClippingBehaviorKHR; @@ -9877,7 +9941,9 @@ VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR( #define VK_KHR_maintenance3 1 #define VK_KHR_MAINTENANCE_3_SPEC_VERSION 1 #define VK_KHR_MAINTENANCE_3_EXTENSION_NAME "VK_KHR_maintenance3" +// VK_KHR_MAINTENANCE3_SPEC_VERSION is a deprecated alias #define VK_KHR_MAINTENANCE3_SPEC_VERSION VK_KHR_MAINTENANCE_3_SPEC_VERSION +// VK_KHR_MAINTENANCE3_EXTENSION_NAME is a deprecated alias #define VK_KHR_MAINTENANCE3_EXTENSION_NAME VK_KHR_MAINTENANCE_3_EXTENSION_NAME typedef VkPhysicalDeviceMaintenance3Properties VkPhysicalDeviceMaintenance3PropertiesKHR; @@ -10259,7 +10325,7 @@ typedef struct VkRenderingInputAttachmentIndexInfoKHR { } VkRenderingInputAttachmentIndexInfoKHR; typedef void (VKAPI_PTR *PFN_vkCmdSetRenderingAttachmentLocationsKHR)(VkCommandBuffer commandBuffer, const VkRenderingAttachmentLocationInfoKHR* pLocationInfo); -typedef void (VKAPI_PTR *PFN_vkCmdSetRenderingInputAttachmentIndicesKHR)(VkCommandBuffer commandBuffer, const VkRenderingInputAttachmentIndexInfoKHR* pLocationInfo); +typedef void (VKAPI_PTR *PFN_vkCmdSetRenderingInputAttachmentIndicesKHR)(VkCommandBuffer commandBuffer, const VkRenderingInputAttachmentIndexInfoKHR* pInputAttachmentIndexInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdSetRenderingAttachmentLocationsKHR( @@ -10268,7 +10334,7 @@ VKAPI_ATTR void VKAPI_CALL vkCmdSetRenderingAttachmentLocationsKHR( VKAPI_ATTR void VKAPI_CALL vkCmdSetRenderingInputAttachmentIndicesKHR( VkCommandBuffer commandBuffer, - const VkRenderingInputAttachmentIndexInfoKHR* pLocationInfo); + const VkRenderingInputAttachmentIndexInfoKHR* pInputAttachmentIndexInfo); #endif @@ -10605,10 +10671,6 @@ typedef enum VkVideoEncodeTuningModeKHR { VK_VIDEO_ENCODE_TUNING_MODE_LOSSLESS_KHR = 4, VK_VIDEO_ENCODE_TUNING_MODE_MAX_ENUM_KHR = 0x7FFFFFFF } VkVideoEncodeTuningModeKHR; - -typedef enum VkVideoEncodeFlagBitsKHR { - VK_VIDEO_ENCODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF -} VkVideoEncodeFlagBitsKHR; typedef VkFlags VkVideoEncodeFlagsKHR; typedef enum VkVideoEncodeCapabilityFlagBitsKHR { @@ -11109,6 +11171,7 @@ typedef VkFlags64 VkPipelineCreateFlagBits2KHR; static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_DISABLE_OPTIMIZATION_BIT_KHR = 0x00000001ULL; static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_ALLOW_DERIVATIVES_BIT_KHR = 0x00000002ULL; static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_DERIVATIVE_BIT_KHR = 0x00000004ULL; +static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_ENABLE_LEGACY_DITHERING_BIT_EXT = 0x400000000ULL; static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = 0x00000008ULL; static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_DISPATCH_BASE_BIT_KHR = 0x00000010ULL; static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_DEFER_COMPILE_BIT_NV = 0x00000020ULL; @@ -11137,6 +11200,7 @@ static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_NO_PROTECTED_ACCE static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_PROTECTED_ACCESS_ONLY_BIT_EXT = 0x40000000ULL; static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_RAY_TRACING_DISPLACEMENT_MICROMAP_BIT_NV = 0x10000000ULL; static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_DESCRIPTOR_BUFFER_BIT_EXT = 0x20000000ULL; +static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_CAPTURE_DATA_BIT_KHR = 0x80000000ULL; typedef VkFlags64 VkBufferUsageFlags2KHR; @@ -11271,6 +11335,128 @@ typedef struct VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR { +// VK_KHR_pipeline_binary is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_pipeline_binary 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineBinaryKHR) +#define VK_MAX_PIPELINE_BINARY_KEY_SIZE_KHR 32U +#define VK_KHR_PIPELINE_BINARY_SPEC_VERSION 1 +#define VK_KHR_PIPELINE_BINARY_EXTENSION_NAME "VK_KHR_pipeline_binary" +typedef struct VkPhysicalDevicePipelineBinaryFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 pipelineBinaries; +} VkPhysicalDevicePipelineBinaryFeaturesKHR; + +typedef struct VkPhysicalDevicePipelineBinaryPropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 pipelineBinaryInternalCache; + VkBool32 pipelineBinaryInternalCacheControl; + VkBool32 pipelineBinaryPrefersInternalCache; + VkBool32 pipelineBinaryPrecompiledInternalCache; + VkBool32 pipelineBinaryCompressedData; +} VkPhysicalDevicePipelineBinaryPropertiesKHR; + +typedef struct VkDevicePipelineBinaryInternalCacheControlKHR { + VkStructureType sType; + const void* pNext; + VkBool32 disableInternalCache; +} VkDevicePipelineBinaryInternalCacheControlKHR; + +typedef struct VkPipelineBinaryKeyKHR { + VkStructureType sType; + void* pNext; + uint32_t keySize; + uint8_t key[VK_MAX_PIPELINE_BINARY_KEY_SIZE_KHR]; +} VkPipelineBinaryKeyKHR; + +typedef struct VkPipelineBinaryDataKHR { + size_t dataSize; + void* pData; +} VkPipelineBinaryDataKHR; + +typedef struct VkPipelineBinaryKeysAndDataKHR { + uint32_t binaryCount; + const VkPipelineBinaryKeyKHR* pPipelineBinaryKeys; + const VkPipelineBinaryDataKHR* pPipelineBinaryData; +} VkPipelineBinaryKeysAndDataKHR; + +typedef struct VkPipelineCreateInfoKHR { + VkStructureType sType; + void* pNext; +} VkPipelineCreateInfoKHR; + +typedef struct VkPipelineBinaryCreateInfoKHR { + VkStructureType sType; + const void* pNext; + const VkPipelineBinaryKeysAndDataKHR* pKeysAndDataInfo; + VkPipeline pipeline; + const VkPipelineCreateInfoKHR* pPipelineCreateInfo; +} VkPipelineBinaryCreateInfoKHR; + +typedef struct VkPipelineBinaryInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t binaryCount; + const VkPipelineBinaryKHR* pPipelineBinaries; +} VkPipelineBinaryInfoKHR; + +typedef struct VkReleaseCapturedPipelineDataInfoKHR { + VkStructureType sType; + void* pNext; + VkPipeline pipeline; +} VkReleaseCapturedPipelineDataInfoKHR; + +typedef struct VkPipelineBinaryDataInfoKHR { + VkStructureType sType; + void* pNext; + VkPipelineBinaryKHR pipelineBinary; +} VkPipelineBinaryDataInfoKHR; + +typedef struct VkPipelineBinaryHandlesInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t pipelineBinaryCount; + VkPipelineBinaryKHR* pPipelineBinaries; +} VkPipelineBinaryHandlesInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineBinariesKHR)(VkDevice device, const VkPipelineBinaryCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineBinaryHandlesInfoKHR* pBinaries); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineBinaryKHR)(VkDevice device, VkPipelineBinaryKHR pipelineBinary, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineKeyKHR)(VkDevice device, const VkPipelineCreateInfoKHR* pPipelineCreateInfo, VkPipelineBinaryKeyKHR* pPipelineKey); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineBinaryDataKHR)(VkDevice device, const VkPipelineBinaryDataInfoKHR* pInfo, VkPipelineBinaryKeyKHR* pPipelineBinaryKey, size_t* pPipelineBinaryDataSize, void* pPipelineBinaryData); +typedef VkResult (VKAPI_PTR *PFN_vkReleaseCapturedPipelineDataKHR)(VkDevice device, const VkReleaseCapturedPipelineDataInfoKHR* pInfo, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineBinariesKHR( + VkDevice device, + const VkPipelineBinaryCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineBinaryHandlesInfoKHR* pBinaries); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineBinaryKHR( + VkDevice device, + VkPipelineBinaryKHR pipelineBinary, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineKeyKHR( + VkDevice device, + const VkPipelineCreateInfoKHR* pPipelineCreateInfo, + VkPipelineBinaryKeyKHR* pPipelineKey); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineBinaryDataKHR( + VkDevice device, + const VkPipelineBinaryDataInfoKHR* pInfo, + VkPipelineBinaryKeyKHR* pPipelineBinaryKey, + size_t* pPipelineBinaryDataSize, + void* pPipelineBinaryData); + +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseCapturedPipelineDataKHR( + VkDevice device, + const VkReleaseCapturedPipelineDataInfoKHR* pInfo, + const VkAllocationCallbacks* pAllocator); +#endif + + // VK_KHR_cooperative_matrix is a preprocessor guard. Do not pass it to API calls. #define VK_KHR_cooperative_matrix 1 #define VK_KHR_COOPERATIVE_MATRIX_SPEC_VERSION 2 @@ -11350,6 +11536,25 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR #endif +// VK_KHR_compute_shader_derivatives is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_compute_shader_derivatives 1 +#define VK_KHR_COMPUTE_SHADER_DERIVATIVES_SPEC_VERSION 1 +#define VK_KHR_COMPUTE_SHADER_DERIVATIVES_EXTENSION_NAME "VK_KHR_compute_shader_derivatives" +typedef struct VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 computeDerivativeGroupQuads; + VkBool32 computeDerivativeGroupLinear; +} VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR; + +typedef struct VkPhysicalDeviceComputeShaderDerivativesPropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 meshAndTaskShaderDerivatives; +} VkPhysicalDeviceComputeShaderDerivativesPropertiesKHR; + + + // VK_KHR_video_decode_av1 is a preprocessor guard. Do not pass it to API calls. #define VK_KHR_video_decode_av1 1 #include "vk_video/vulkan_video_codec_av1std.h" @@ -11699,6 +11904,74 @@ VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorBufferEmbeddedSamplers2EXT( #endif +// VK_KHR_shader_relaxed_extended_instruction is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_relaxed_extended_instruction 1 +#define VK_KHR_SHADER_RELAXED_EXTENDED_INSTRUCTION_SPEC_VERSION 1 +#define VK_KHR_SHADER_RELAXED_EXTENDED_INSTRUCTION_EXTENSION_NAME "VK_KHR_shader_relaxed_extended_instruction" +typedef struct VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderRelaxedExtendedInstruction; +} VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR; + + + +// VK_KHR_maintenance7 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_maintenance7 1 +#define VK_KHR_MAINTENANCE_7_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE_7_EXTENSION_NAME "VK_KHR_maintenance7" + +typedef enum VkPhysicalDeviceLayeredApiKHR { + VK_PHYSICAL_DEVICE_LAYERED_API_VULKAN_KHR = 0, + VK_PHYSICAL_DEVICE_LAYERED_API_D3D12_KHR = 1, + VK_PHYSICAL_DEVICE_LAYERED_API_METAL_KHR = 2, + VK_PHYSICAL_DEVICE_LAYERED_API_OPENGL_KHR = 3, + VK_PHYSICAL_DEVICE_LAYERED_API_OPENGLES_KHR = 4, + VK_PHYSICAL_DEVICE_LAYERED_API_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPhysicalDeviceLayeredApiKHR; +typedef struct VkPhysicalDeviceMaintenance7FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 maintenance7; +} VkPhysicalDeviceMaintenance7FeaturesKHR; + +typedef struct VkPhysicalDeviceMaintenance7PropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 robustFragmentShadingRateAttachmentAccess; + VkBool32 separateDepthStencilAttachmentAccess; + uint32_t maxDescriptorSetTotalUniformBuffersDynamic; + uint32_t maxDescriptorSetTotalStorageBuffersDynamic; + uint32_t maxDescriptorSetTotalBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindTotalUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindTotalStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindTotalBuffersDynamic; +} VkPhysicalDeviceMaintenance7PropertiesKHR; + +typedef struct VkPhysicalDeviceLayeredApiPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t vendorID; + uint32_t deviceID; + VkPhysicalDeviceLayeredApiKHR layeredAPI; + char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]; +} VkPhysicalDeviceLayeredApiPropertiesKHR; + +typedef struct VkPhysicalDeviceLayeredApiPropertiesListKHR { + VkStructureType sType; + void* pNext; + uint32_t layeredApiCount; + VkPhysicalDeviceLayeredApiPropertiesKHR* pLayeredApis; +} VkPhysicalDeviceLayeredApiPropertiesListKHR; + +typedef struct VkPhysicalDeviceLayeredApiVulkanPropertiesKHR { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceProperties2 properties; +} VkPhysicalDeviceLayeredApiVulkanPropertiesKHR; + + + // VK_EXT_debug_report is a preprocessor guard. Do not pass it to API calls. #define VK_EXT_debug_report 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT) @@ -11747,7 +12020,9 @@ typedef enum VkDebugReportObjectTypeEXT { VK_DEBUG_REPORT_OBJECT_TYPE_CUDA_MODULE_NV_EXT = 1000307000, VK_DEBUG_REPORT_OBJECT_TYPE_CUDA_FUNCTION_NV_EXT = 1000307001, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA_EXT = 1000366000, + // VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT is a deprecated alias VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, + // VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT is a deprecated alias VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, @@ -12519,6 +12794,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkReleaseDisplayEXT( typedef enum VkSurfaceCounterFlagBitsEXT { VK_SURFACE_COUNTER_VBLANK_BIT_EXT = 0x00000001, + // VK_SURFACE_COUNTER_VBLANK_EXT is a deprecated alias VK_SURFACE_COUNTER_VBLANK_EXT = VK_SURFACE_COUNTER_VBLANK_BIT_EXT, VK_SURFACE_COUNTER_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF } VkSurfaceCounterFlagBitsEXT; @@ -12687,7 +12963,9 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE( #define VK_NV_viewport_array2 1 #define VK_NV_VIEWPORT_ARRAY_2_SPEC_VERSION 1 #define VK_NV_VIEWPORT_ARRAY_2_EXTENSION_NAME "VK_NV_viewport_array2" +// VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION is a deprecated alias #define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION VK_NV_VIEWPORT_ARRAY_2_SPEC_VERSION +// VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME is a deprecated alias #define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME VK_NV_VIEWPORT_ARRAY_2_EXTENSION_NAME @@ -12842,13 +13120,13 @@ typedef struct VkPipelineRasterizationDepthClipStateCreateInfoEXT { // VK_EXT_swapchain_colorspace is a preprocessor guard. Do not pass it to API calls. #define VK_EXT_swapchain_colorspace 1 -#define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 4 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 5 #define VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME "VK_EXT_swapchain_colorspace" // VK_EXT_hdr_metadata is a preprocessor guard. Do not pass it to API calls. #define VK_EXT_hdr_metadata 1 -#define VK_EXT_HDR_METADATA_SPEC_VERSION 2 +#define VK_EXT_HDR_METADATA_SPEC_VERSION 3 #define VK_EXT_HDR_METADATA_EXTENSION_NAME "VK_EXT_hdr_metadata" typedef struct VkXYColorEXT { float x; @@ -14145,12 +14423,7 @@ typedef VkPipelineCreationFeedback VkPipelineCreationFeedbackEXT; #define VK_NV_compute_shader_derivatives 1 #define VK_NV_COMPUTE_SHADER_DERIVATIVES_SPEC_VERSION 1 #define VK_NV_COMPUTE_SHADER_DERIVATIVES_EXTENSION_NAME "VK_NV_compute_shader_derivatives" -typedef struct VkPhysicalDeviceComputeShaderDerivativesFeaturesNV { - VkStructureType sType; - void* pNext; - VkBool32 computeDerivativeGroupQuads; - VkBool32 computeDerivativeGroupLinear; -} VkPhysicalDeviceComputeShaderDerivativesFeaturesNV; +typedef VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR VkPhysicalDeviceComputeShaderDerivativesFeaturesNV; @@ -14535,7 +14808,9 @@ typedef VkPhysicalDeviceScalarBlockLayoutFeatures VkPhysicalDeviceScalarBlockLay #define VK_GOOGLE_hlsl_functionality1 1 #define VK_GOOGLE_HLSL_FUNCTIONALITY_1_SPEC_VERSION 1 #define VK_GOOGLE_HLSL_FUNCTIONALITY_1_EXTENSION_NAME "VK_GOOGLE_hlsl_functionality1" +// VK_GOOGLE_HLSL_FUNCTIONALITY1_SPEC_VERSION is a deprecated alias #define VK_GOOGLE_HLSL_FUNCTIONALITY1_SPEC_VERSION VK_GOOGLE_HLSL_FUNCTIONALITY_1_SPEC_VERSION +// VK_GOOGLE_HLSL_FUNCTIONALITY1_EXTENSION_NAME is a deprecated alias #define VK_GOOGLE_HLSL_FUNCTIONALITY1_EXTENSION_NAME VK_GOOGLE_HLSL_FUNCTIONALITY_1_EXTENSION_NAME @@ -16052,14 +16327,14 @@ typedef struct VkDescriptorAddressInfoEXT { typedef struct VkDescriptorBufferBindingInfoEXT { VkStructureType sType; - void* pNext; + const void* pNext; VkDeviceAddress address; VkBufferUsageFlags usage; } VkDescriptorBufferBindingInfoEXT; typedef struct VkDescriptorBufferBindingPushDescriptorBufferHandleEXT { VkStructureType sType; - void* pNext; + const void* pNext; VkBuffer buffer; } VkDescriptorBufferBindingPushDescriptorBufferHandleEXT; @@ -17691,7 +17966,7 @@ typedef struct VkRenderPassStripeSubmitInfoARM { // VK_QCOM_fragment_density_map_offset is a preprocessor guard. Do not pass it to API calls. #define VK_QCOM_fragment_density_map_offset 1 -#define VK_QCOM_FRAGMENT_DENSITY_MAP_OFFSET_SPEC_VERSION 1 +#define VK_QCOM_FRAGMENT_DENSITY_MAP_OFFSET_SPEC_VERSION 2 #define VK_QCOM_FRAGMENT_DENSITY_MAP_OFFSET_EXTENSION_NAME "VK_QCOM_fragment_density_map_offset" typedef struct VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM { VkStructureType sType; @@ -18502,7 +18777,7 @@ VKAPI_ATTR void VKAPI_CALL vkCmdOpticalFlowExecuteNV( // VK_EXT_legacy_dithering is a preprocessor guard. Do not pass it to API calls. #define VK_EXT_legacy_dithering 1 -#define VK_EXT_LEGACY_DITHERING_SPEC_VERSION 1 +#define VK_EXT_LEGACY_DITHERING_SPEC_VERSION 2 #define VK_EXT_LEGACY_DITHERING_EXTENSION_NAME "VK_EXT_legacy_dithering" typedef struct VkPhysicalDeviceLegacyDitheringFeaturesEXT { VkStructureType sType; @@ -18524,6 +18799,53 @@ typedef struct VkPhysicalDevicePipelineProtectedAccessFeaturesEXT { +// VK_AMD_anti_lag is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_anti_lag 1 +#define VK_AMD_ANTI_LAG_SPEC_VERSION 1 +#define VK_AMD_ANTI_LAG_EXTENSION_NAME "VK_AMD_anti_lag" + +typedef enum VkAntiLagModeAMD { + VK_ANTI_LAG_MODE_DRIVER_CONTROL_AMD = 0, + VK_ANTI_LAG_MODE_ON_AMD = 1, + VK_ANTI_LAG_MODE_OFF_AMD = 2, + VK_ANTI_LAG_MODE_MAX_ENUM_AMD = 0x7FFFFFFF +} VkAntiLagModeAMD; + +typedef enum VkAntiLagStageAMD { + VK_ANTI_LAG_STAGE_INPUT_AMD = 0, + VK_ANTI_LAG_STAGE_PRESENT_AMD = 1, + VK_ANTI_LAG_STAGE_MAX_ENUM_AMD = 0x7FFFFFFF +} VkAntiLagStageAMD; +typedef struct VkPhysicalDeviceAntiLagFeaturesAMD { + VkStructureType sType; + void* pNext; + VkBool32 antiLag; +} VkPhysicalDeviceAntiLagFeaturesAMD; + +typedef struct VkAntiLagPresentationInfoAMD { + VkStructureType sType; + void* pNext; + VkAntiLagStageAMD stage; + uint64_t frameIndex; +} VkAntiLagPresentationInfoAMD; + +typedef struct VkAntiLagDataAMD { + VkStructureType sType; + const void* pNext; + VkAntiLagModeAMD mode; + uint32_t maxFPS; + const VkAntiLagPresentationInfoAMD* pPresentationInfo; +} VkAntiLagDataAMD; + +typedef void (VKAPI_PTR *PFN_vkAntiLagUpdateAMD)(VkDevice device, const VkAntiLagDataAMD* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkAntiLagUpdateAMD( + VkDevice device, + const VkAntiLagDataAMD* pData); +#endif + + // VK_EXT_shader_object is a preprocessor guard. Do not pass it to API calls. #define VK_EXT_shader_object 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderEXT) @@ -18727,6 +19049,24 @@ typedef struct VkPhysicalDeviceExtendedSparseAddressSpacePropertiesNV { #define VK_EXT_MUTABLE_DESCRIPTOR_TYPE_EXTENSION_NAME "VK_EXT_mutable_descriptor_type" +// VK_EXT_legacy_vertex_attributes is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_legacy_vertex_attributes 1 +#define VK_EXT_LEGACY_VERTEX_ATTRIBUTES_SPEC_VERSION 1 +#define VK_EXT_LEGACY_VERTEX_ATTRIBUTES_EXTENSION_NAME "VK_EXT_legacy_vertex_attributes" +typedef struct VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 legacyVertexAttributes; +} VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT; + +typedef struct VkPhysicalDeviceLegacyVertexAttributesPropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 nativeUnalignedPerformance; +} VkPhysicalDeviceLegacyVertexAttributesPropertiesEXT; + + + // VK_EXT_layer_settings is a preprocessor guard. Do not pass it to API calls. #define VK_EXT_layer_settings 1 #define VK_EXT_LAYER_SETTINGS_SPEC_VERSION 2 @@ -19123,6 +19463,18 @@ typedef struct VkPhysicalDeviceRawAccessChainsFeaturesNV { +// VK_NV_command_buffer_inheritance is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_command_buffer_inheritance 1 +#define VK_NV_COMMAND_BUFFER_INHERITANCE_SPEC_VERSION 1 +#define VK_NV_COMMAND_BUFFER_INHERITANCE_EXTENSION_NAME "VK_NV_command_buffer_inheritance" +typedef struct VkPhysicalDeviceCommandBufferInheritanceFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 commandBufferInheritance; +} VkPhysicalDeviceCommandBufferInheritanceFeaturesNV; + + + // VK_NV_shader_atomic_float16_vector is a preprocessor guard. Do not pass it to API calls. #define VK_NV_shader_atomic_float16_vector 1 #define VK_NV_SHADER_ATOMIC_FLOAT16_VECTOR_SPEC_VERSION 1 @@ -19135,6 +19487,18 @@ typedef struct VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV { +// VK_EXT_shader_replicated_composites is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_replicated_composites 1 +#define VK_EXT_SHADER_REPLICATED_COMPOSITES_SPEC_VERSION 1 +#define VK_EXT_SHADER_REPLICATED_COMPOSITES_EXTENSION_NAME "VK_EXT_shader_replicated_composites" +typedef struct VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderReplicatedComposites; +} VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT; + + + // VK_NV_ray_tracing_validation is a preprocessor guard. Do not pass it to API calls. #define VK_NV_ray_tracing_validation 1 #define VK_NV_RAY_TRACING_VALIDATION_SPEC_VERSION 1 @@ -19147,6 +19511,30 @@ typedef struct VkPhysicalDeviceRayTracingValidationFeaturesNV { +// VK_MESA_image_alignment_control is a preprocessor guard. Do not pass it to API calls. +#define VK_MESA_image_alignment_control 1 +#define VK_MESA_IMAGE_ALIGNMENT_CONTROL_SPEC_VERSION 1 +#define VK_MESA_IMAGE_ALIGNMENT_CONTROL_EXTENSION_NAME "VK_MESA_image_alignment_control" +typedef struct VkPhysicalDeviceImageAlignmentControlFeaturesMESA { + VkStructureType sType; + void* pNext; + VkBool32 imageAlignmentControl; +} VkPhysicalDeviceImageAlignmentControlFeaturesMESA; + +typedef struct VkPhysicalDeviceImageAlignmentControlPropertiesMESA { + VkStructureType sType; + void* pNext; + uint32_t supportedImageAlignmentMask; +} VkPhysicalDeviceImageAlignmentControlPropertiesMESA; + +typedef struct VkImageAlignmentControlCreateInfoMESA { + VkStructureType sType; + const void* pNext; + uint32_t maximumRequestedAlignment; +} VkImageAlignmentControlCreateInfoMESA; + + + // VK_KHR_acceleration_structure is a preprocessor guard. Do not pass it to API calls. #define VK_KHR_acceleration_structure 1 #define VK_KHR_ACCELERATION_STRUCTURE_SPEC_VERSION 13 diff --git a/third_party/vulkan/vulkan_enums.hpp b/third_party/vulkan/vulkan_enums.hpp index 222a796..fe4f89d 100644 --- a/third_party/vulkan/vulkan_enums.hpp +++ b/third_party/vulkan/vulkan_enums.hpp @@ -8,6 +8,9 @@ #ifndef VULKAN_ENUMS_HPP #define VULKAN_ENUMS_HPP +// include-what-you-use: make sure, vulkan.hpp is used by code-completers +// IWYU pragma: private; include "vulkan.hpp" + namespace VULKAN_HPP_NAMESPACE { template @@ -281,7 +284,9 @@ namespace VULKAN_HPP_NAMESPACE eErrorInvalidVideoStdParametersKHR = VK_ERROR_INVALID_VIDEO_STD_PARAMETERS_KHR, eErrorCompressionExhaustedEXT = VK_ERROR_COMPRESSION_EXHAUSTED_EXT, eIncompatibleShaderBinaryEXT = VK_INCOMPATIBLE_SHADER_BINARY_EXT, - eErrorIncompatibleShaderBinaryEXT = VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT + eErrorIncompatibleShaderBinaryEXT = VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT, + ePipelineBinaryMissingKHR = VK_PIPELINE_BINARY_MISSING_KHR, + eErrorNotEnoughSpaceKHR = VK_ERROR_NOT_ENOUGH_SPACE_KHR }; enum class StructureType @@ -940,15 +945,11 @@ namespace VULKAN_HPP_NAMESPACE ePipelineRepresentativeFragmentTestStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV, ePhysicalDeviceImageViewImageFormatInfoEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT, eFilterCubicImageViewImageFormatPropertiesEXT = VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT, - eDeviceQueueGlobalPriorityCreateInfoKHR = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR, - eDeviceQueueGlobalPriorityCreateInfoEXT = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT, eImportMemoryHostPointerInfoEXT = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT, eMemoryHostPointerPropertiesEXT = VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT, ePhysicalDeviceExternalMemoryHostPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT, ePhysicalDeviceShaderClockFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR, ePipelineCompilerControlCreateInfoAMD = VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD, - eCalibratedTimestampInfoKHR = VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_KHR, - eCalibratedTimestampInfoEXT = VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT, ePhysicalDeviceShaderCorePropertiesAMD = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD, eVideoDecodeH265CapabilitiesKHR = VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_CAPABILITIES_KHR, eVideoDecodeH265SessionParametersCreateInfoKHR = VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_CREATE_INFO_KHR, @@ -956,24 +957,19 @@ namespace VULKAN_HPP_NAMESPACE eVideoDecodeH265ProfileInfoKHR = VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PROFILE_INFO_KHR, eVideoDecodeH265PictureInfoKHR = VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PICTURE_INFO_KHR, eVideoDecodeH265DpbSlotInfoKHR = VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_DPB_SLOT_INFO_KHR, + eDeviceQueueGlobalPriorityCreateInfoKHR = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR, + eDeviceQueueGlobalPriorityCreateInfoEXT = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT, ePhysicalDeviceGlobalPriorityQueryFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR, ePhysicalDeviceGlobalPriorityQueryFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT, eQueueFamilyGlobalPriorityPropertiesKHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR, eQueueFamilyGlobalPriorityPropertiesEXT = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT, eDeviceMemoryOverallocationCreateInfoAMD = VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD, ePhysicalDeviceVertexAttributeDivisorPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT, - ePipelineVertexInputDivisorStateCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_KHR, - ePipelineVertexInputDivisorStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT, - ePhysicalDeviceVertexAttributeDivisorFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_KHR, - ePhysicalDeviceVertexAttributeDivisorFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT, #if defined( VK_USE_PLATFORM_GGP ) ePresentFrameTokenGGP = VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP, #endif /*VK_USE_PLATFORM_GGP*/ - ePhysicalDeviceComputeShaderDerivativesFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV, ePhysicalDeviceMeshShaderFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV, ePhysicalDeviceMeshShaderPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV, - ePhysicalDeviceFragmentShaderBarycentricFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR, - ePhysicalDeviceFragmentShaderBarycentricFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV, ePhysicalDeviceShaderImageFootprintFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV, ePipelineViewportExclusiveScissorStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV, ePhysicalDeviceExclusiveScissorFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV, @@ -1038,15 +1034,7 @@ namespace VULKAN_HPP_NAMESPACE eSurfaceFullScreenExclusiveWin32InfoEXT = VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT, #endif /*VK_USE_PLATFORM_WIN32_KHR*/ eHeadlessSurfaceCreateInfoEXT = VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT, - ePhysicalDeviceLineRasterizationFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_KHR, - ePhysicalDeviceLineRasterizationFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT, - ePipelineRasterizationLineStateCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_KHR, - ePipelineRasterizationLineStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT, - ePhysicalDeviceLineRasterizationPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_KHR, - ePhysicalDeviceLineRasterizationPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT, ePhysicalDeviceShaderAtomicFloatFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT, - ePhysicalDeviceIndexTypeUint8FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_KHR, - ePhysicalDeviceIndexTypeUint8FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT, ePhysicalDeviceExtendedDynamicStateFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT, ePhysicalDevicePipelineExecutablePropertiesFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR, ePipelineInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR, @@ -1145,62 +1133,54 @@ namespace VULKAN_HPP_NAMESPACE eExportMetalSharedEventInfoEXT = VK_STRUCTURE_TYPE_EXPORT_METAL_SHARED_EVENT_INFO_EXT, eImportMetalSharedEventInfoEXT = VK_STRUCTURE_TYPE_IMPORT_METAL_SHARED_EVENT_INFO_EXT, #endif /*VK_USE_PLATFORM_METAL_EXT*/ - eQueueFamilyCheckpointProperties2NV = VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_2_NV, - eCheckpointData2NV = VK_STRUCTURE_TYPE_CHECKPOINT_DATA_2_NV, - ePhysicalDeviceDescriptorBufferPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_PROPERTIES_EXT, - ePhysicalDeviceDescriptorBufferDensityMapPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_DENSITY_MAP_PROPERTIES_EXT, - ePhysicalDeviceDescriptorBufferFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_FEATURES_EXT, - eDescriptorAddressInfoEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_ADDRESS_INFO_EXT, - eDescriptorGetInfoEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_GET_INFO_EXT, - eBufferCaptureDescriptorDataInfoEXT = VK_STRUCTURE_TYPE_BUFFER_CAPTURE_DESCRIPTOR_DATA_INFO_EXT, - eImageCaptureDescriptorDataInfoEXT = VK_STRUCTURE_TYPE_IMAGE_CAPTURE_DESCRIPTOR_DATA_INFO_EXT, - eImageViewCaptureDescriptorDataInfoEXT = VK_STRUCTURE_TYPE_IMAGE_VIEW_CAPTURE_DESCRIPTOR_DATA_INFO_EXT, - eSamplerCaptureDescriptorDataInfoEXT = VK_STRUCTURE_TYPE_SAMPLER_CAPTURE_DESCRIPTOR_DATA_INFO_EXT, - eOpaqueCaptureDescriptorDataCreateInfoEXT = VK_STRUCTURE_TYPE_OPAQUE_CAPTURE_DESCRIPTOR_DATA_CREATE_INFO_EXT, - eDescriptorBufferBindingInfoEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_BUFFER_BINDING_INFO_EXT, - eDescriptorBufferBindingPushDescriptorBufferHandleEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_BUFFER_BINDING_PUSH_DESCRIPTOR_BUFFER_HANDLE_EXT, - eAccelerationStructureCaptureDescriptorDataInfoEXT = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CAPTURE_DESCRIPTOR_DATA_INFO_EXT, - ePhysicalDeviceGraphicsPipelineLibraryFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_FEATURES_EXT, - ePhysicalDeviceGraphicsPipelineLibraryPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_PROPERTIES_EXT, - eGraphicsPipelineLibraryCreateInfoEXT = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_LIBRARY_CREATE_INFO_EXT, - ePhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_FEATURES_AMD, - ePhysicalDeviceFragmentShaderBarycentricPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_PROPERTIES_KHR, - ePhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_FEATURES_KHR, - ePhysicalDeviceFragmentShadingRateEnumsPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV, - ePhysicalDeviceFragmentShadingRateEnumsFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV, - ePipelineFragmentShadingRateEnumStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV, - eAccelerationStructureGeometryMotionTrianglesDataNV = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_MOTION_TRIANGLES_DATA_NV, - ePhysicalDeviceRayTracingMotionBlurFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MOTION_BLUR_FEATURES_NV, - eAccelerationStructureMotionInfoNV = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MOTION_INFO_NV, - ePhysicalDeviceMeshShaderFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_EXT, - ePhysicalDeviceMeshShaderPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_EXT, - ePhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT, - ePhysicalDeviceFragmentDensityMap2FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT, - ePhysicalDeviceFragmentDensityMap2PropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT, - eCopyCommandTransformInfoQCOM = VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM, - ePhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_FEATURES_KHR, - ePhysicalDeviceImageCompressionControlFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_FEATURES_EXT, - eImageCompressionControlEXT = VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_CONTROL_EXT, - eSubresourceLayout2KHR = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_KHR, - eSubresourceLayout2EXT = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_EXT, - eImageSubresource2KHR = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_KHR, - eImageSubresource2EXT = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_EXT, - eImageCompressionPropertiesEXT = VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_PROPERTIES_EXT, - ePhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_FEATURES_EXT, - ePhysicalDevice4444FormatsFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT, - ePhysicalDeviceFaultFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FAULT_FEATURES_EXT, - eDeviceFaultCountsEXT = VK_STRUCTURE_TYPE_DEVICE_FAULT_COUNTS_EXT, - eDeviceFaultInfoEXT = VK_STRUCTURE_TYPE_DEVICE_FAULT_INFO_EXT, - ePhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_EXT, - ePhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_ARM, - ePhysicalDeviceRgba10X6FormatsFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RGBA10X6_FORMATS_FEATURES_EXT, + eQueueFamilyCheckpointProperties2NV = VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_2_NV, + eCheckpointData2NV = VK_STRUCTURE_TYPE_CHECKPOINT_DATA_2_NV, + ePhysicalDeviceDescriptorBufferPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_PROPERTIES_EXT, + ePhysicalDeviceDescriptorBufferDensityMapPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_DENSITY_MAP_PROPERTIES_EXT, + ePhysicalDeviceDescriptorBufferFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_FEATURES_EXT, + eDescriptorAddressInfoEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_ADDRESS_INFO_EXT, + eDescriptorGetInfoEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_GET_INFO_EXT, + eBufferCaptureDescriptorDataInfoEXT = VK_STRUCTURE_TYPE_BUFFER_CAPTURE_DESCRIPTOR_DATA_INFO_EXT, + eImageCaptureDescriptorDataInfoEXT = VK_STRUCTURE_TYPE_IMAGE_CAPTURE_DESCRIPTOR_DATA_INFO_EXT, + eImageViewCaptureDescriptorDataInfoEXT = VK_STRUCTURE_TYPE_IMAGE_VIEW_CAPTURE_DESCRIPTOR_DATA_INFO_EXT, + eSamplerCaptureDescriptorDataInfoEXT = VK_STRUCTURE_TYPE_SAMPLER_CAPTURE_DESCRIPTOR_DATA_INFO_EXT, + eOpaqueCaptureDescriptorDataCreateInfoEXT = VK_STRUCTURE_TYPE_OPAQUE_CAPTURE_DESCRIPTOR_DATA_CREATE_INFO_EXT, + eDescriptorBufferBindingInfoEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_BUFFER_BINDING_INFO_EXT, + eDescriptorBufferBindingPushDescriptorBufferHandleEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_BUFFER_BINDING_PUSH_DESCRIPTOR_BUFFER_HANDLE_EXT, + eAccelerationStructureCaptureDescriptorDataInfoEXT = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CAPTURE_DESCRIPTOR_DATA_INFO_EXT, + ePhysicalDeviceGraphicsPipelineLibraryFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_FEATURES_EXT, + ePhysicalDeviceGraphicsPipelineLibraryPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_PROPERTIES_EXT, + eGraphicsPipelineLibraryCreateInfoEXT = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_LIBRARY_CREATE_INFO_EXT, + ePhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_FEATURES_AMD, + ePhysicalDeviceFragmentShaderBarycentricFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR, + ePhysicalDeviceFragmentShaderBarycentricFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV, + ePhysicalDeviceFragmentShaderBarycentricPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_PROPERTIES_KHR, + ePhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_FEATURES_KHR, + ePhysicalDeviceFragmentShadingRateEnumsPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV, + ePhysicalDeviceFragmentShadingRateEnumsFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV, + ePipelineFragmentShadingRateEnumStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV, + eAccelerationStructureGeometryMotionTrianglesDataNV = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_MOTION_TRIANGLES_DATA_NV, + ePhysicalDeviceRayTracingMotionBlurFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MOTION_BLUR_FEATURES_NV, + eAccelerationStructureMotionInfoNV = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MOTION_INFO_NV, + ePhysicalDeviceMeshShaderFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_EXT, + ePhysicalDeviceMeshShaderPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_EXT, + ePhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT, + ePhysicalDeviceFragmentDensityMap2FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT, + ePhysicalDeviceFragmentDensityMap2PropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT, + eCopyCommandTransformInfoQCOM = VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM, + ePhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_FEATURES_KHR, + ePhysicalDeviceImageCompressionControlFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_FEATURES_EXT, + eImageCompressionControlEXT = VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_CONTROL_EXT, + eImageCompressionPropertiesEXT = VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_PROPERTIES_EXT, + ePhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_FEATURES_EXT, + ePhysicalDevice4444FormatsFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT, + ePhysicalDeviceFaultFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FAULT_FEATURES_EXT, + eDeviceFaultCountsEXT = VK_STRUCTURE_TYPE_DEVICE_FAULT_COUNTS_EXT, + eDeviceFaultInfoEXT = VK_STRUCTURE_TYPE_DEVICE_FAULT_INFO_EXT, + ePhysicalDeviceRgba10X6FormatsFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RGBA10X6_FORMATS_FEATURES_EXT, #if defined( VK_USE_PLATFORM_DIRECTFB_EXT ) eDirectfbSurfaceCreateInfoEXT = VK_STRUCTURE_TYPE_DIRECTFB_SURFACE_CREATE_INFO_EXT, #endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ - ePhysicalDeviceMutableDescriptorTypeFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT, - ePhysicalDeviceMutableDescriptorTypeFeaturesVALVE = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE, - eMutableDescriptorTypeCreateInfoEXT = VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT, - eMutableDescriptorTypeCreateInfoVALVE = VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE, ePhysicalDeviceVertexInputDynamicStateFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_INPUT_DYNAMIC_STATE_FEATURES_EXT, eVertexInputBindingDescription2EXT = VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT, eVertexInputAttributeDescription2EXT = VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT, @@ -1270,69 +1250,71 @@ namespace VULKAN_HPP_NAMESPACE ePhysicalDeviceDisplacementMicromapPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_PROPERTIES_NV, eAccelerationStructureTrianglesDisplacementMicromapNV = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_TRIANGLES_DISPLACEMENT_MICROMAP_NV, #endif /*VK_ENABLE_BETA_EXTENSIONS*/ - ePhysicalDeviceClusterCullingShaderFeaturesHUAWEI = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_FEATURES_HUAWEI, - ePhysicalDeviceClusterCullingShaderPropertiesHUAWEI = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_PROPERTIES_HUAWEI, - ePhysicalDeviceClusterCullingShaderVrsFeaturesHUAWEI = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_VRS_FEATURES_HUAWEI, - ePhysicalDeviceBorderColorSwizzleFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BORDER_COLOR_SWIZZLE_FEATURES_EXT, - eSamplerBorderColorComponentMappingCreateInfoEXT = VK_STRUCTURE_TYPE_SAMPLER_BORDER_COLOR_COMPONENT_MAPPING_CREATE_INFO_EXT, - ePhysicalDevicePageableDeviceLocalMemoryFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT, - ePhysicalDeviceShaderCorePropertiesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_ARM, - ePhysicalDeviceShaderSubgroupRotateFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES_KHR, - eDeviceQueueShaderCoreControlCreateInfoARM = VK_STRUCTURE_TYPE_DEVICE_QUEUE_SHADER_CORE_CONTROL_CREATE_INFO_ARM, - ePhysicalDeviceSchedulingControlsFeaturesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_FEATURES_ARM, - ePhysicalDeviceSchedulingControlsPropertiesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_PROPERTIES_ARM, - ePhysicalDeviceImageSlicedViewOf3DFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_SLICED_VIEW_OF_3D_FEATURES_EXT, - eImageViewSlicedCreateInfoEXT = VK_STRUCTURE_TYPE_IMAGE_VIEW_SLICED_CREATE_INFO_EXT, - ePhysicalDeviceDescriptorSetHostMappingFeaturesVALVE = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_SET_HOST_MAPPING_FEATURES_VALVE, - eDescriptorSetBindingReferenceVALVE = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_BINDING_REFERENCE_VALVE, - eDescriptorSetLayoutHostMappingInfoVALVE = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_HOST_MAPPING_INFO_VALVE, - ePhysicalDeviceDepthClampZeroOneFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_ZERO_ONE_FEATURES_EXT, - ePhysicalDeviceNonSeamlessCubeMapFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NON_SEAMLESS_CUBE_MAP_FEATURES_EXT, - ePhysicalDeviceRenderPassStripedFeaturesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RENDER_PASS_STRIPED_FEATURES_ARM, - ePhysicalDeviceRenderPassStripedPropertiesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RENDER_PASS_STRIPED_PROPERTIES_ARM, - eRenderPassStripeBeginInfoARM = VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_BEGIN_INFO_ARM, - eRenderPassStripeInfoARM = VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_INFO_ARM, - eRenderPassStripeSubmitInfoARM = VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_SUBMIT_INFO_ARM, - ePhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_FEATURES_QCOM, - ePhysicalDeviceFragmentDensityMapOffsetPropertiesQCOM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_PROPERTIES_QCOM, - eSubpassFragmentDensityMapOffsetEndInfoQCOM = VK_STRUCTURE_TYPE_SUBPASS_FRAGMENT_DENSITY_MAP_OFFSET_END_INFO_QCOM, - ePhysicalDeviceCopyMemoryIndirectFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_FEATURES_NV, - ePhysicalDeviceCopyMemoryIndirectPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_PROPERTIES_NV, - ePhysicalDeviceMemoryDecompressionFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_FEATURES_NV, - ePhysicalDeviceMemoryDecompressionPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_PROPERTIES_NV, - ePhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_COMPUTE_FEATURES_NV, - eComputePipelineIndirectBufferInfoNV = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_INDIRECT_BUFFER_INFO_NV, - ePipelineIndirectDeviceAddressInfoNV = VK_STRUCTURE_TYPE_PIPELINE_INDIRECT_DEVICE_ADDRESS_INFO_NV, - ePhysicalDeviceLinearColorAttachmentFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINEAR_COLOR_ATTACHMENT_FEATURES_NV, - ePhysicalDeviceShaderMaximalReconvergenceFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MAXIMAL_RECONVERGENCE_FEATURES_KHR, - ePhysicalDeviceImageCompressionControlSwapchainFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_FEATURES_EXT, - ePhysicalDeviceImageProcessingFeaturesQCOM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_FEATURES_QCOM, - ePhysicalDeviceImageProcessingPropertiesQCOM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_PROPERTIES_QCOM, - eImageViewSampleWeightCreateInfoQCOM = VK_STRUCTURE_TYPE_IMAGE_VIEW_SAMPLE_WEIGHT_CREATE_INFO_QCOM, - ePhysicalDeviceNestedCommandBufferFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NESTED_COMMAND_BUFFER_FEATURES_EXT, - ePhysicalDeviceNestedCommandBufferPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NESTED_COMMAND_BUFFER_PROPERTIES_EXT, - eExternalMemoryAcquireUnmodifiedEXT = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_ACQUIRE_UNMODIFIED_EXT, - ePhysicalDeviceExtendedDynamicState3FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_3_FEATURES_EXT, - ePhysicalDeviceExtendedDynamicState3PropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_3_PROPERTIES_EXT, - ePhysicalDeviceSubpassMergeFeedbackFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_MERGE_FEEDBACK_FEATURES_EXT, - eRenderPassCreationControlEXT = VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_CONTROL_EXT, - eRenderPassCreationFeedbackCreateInfoEXT = VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_FEEDBACK_CREATE_INFO_EXT, - eRenderPassSubpassFeedbackCreateInfoEXT = VK_STRUCTURE_TYPE_RENDER_PASS_SUBPASS_FEEDBACK_CREATE_INFO_EXT, - eDirectDriverLoadingInfoLUNARG = VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_INFO_LUNARG, - eDirectDriverLoadingListLUNARG = VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_LIST_LUNARG, - ePhysicalDeviceShaderModuleIdentifierFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_FEATURES_EXT, - ePhysicalDeviceShaderModuleIdentifierPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_PROPERTIES_EXT, - ePipelineShaderStageModuleIdentifierCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT, - eShaderModuleIdentifierEXT = VK_STRUCTURE_TYPE_SHADER_MODULE_IDENTIFIER_EXT, - ePhysicalDeviceOpticalFlowFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPTICAL_FLOW_FEATURES_NV, - ePhysicalDeviceOpticalFlowPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPTICAL_FLOW_PROPERTIES_NV, - eOpticalFlowImageFormatInfoNV = VK_STRUCTURE_TYPE_OPTICAL_FLOW_IMAGE_FORMAT_INFO_NV, - eOpticalFlowImageFormatPropertiesNV = VK_STRUCTURE_TYPE_OPTICAL_FLOW_IMAGE_FORMAT_PROPERTIES_NV, - eOpticalFlowSessionCreateInfoNV = VK_STRUCTURE_TYPE_OPTICAL_FLOW_SESSION_CREATE_INFO_NV, - eOpticalFlowExecuteInfoNV = VK_STRUCTURE_TYPE_OPTICAL_FLOW_EXECUTE_INFO_NV, - eOpticalFlowSessionCreatePrivateDataInfoNV = VK_STRUCTURE_TYPE_OPTICAL_FLOW_SESSION_CREATE_PRIVATE_DATA_INFO_NV, - ePhysicalDeviceLegacyDitheringFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_DITHERING_FEATURES_EXT, - ePhysicalDevicePipelineProtectedAccessFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES_EXT, + ePhysicalDeviceClusterCullingShaderFeaturesHUAWEI = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_FEATURES_HUAWEI, + ePhysicalDeviceClusterCullingShaderPropertiesHUAWEI = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_PROPERTIES_HUAWEI, + ePhysicalDeviceClusterCullingShaderVrsFeaturesHUAWEI = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_VRS_FEATURES_HUAWEI, + ePhysicalDeviceBorderColorSwizzleFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BORDER_COLOR_SWIZZLE_FEATURES_EXT, + eSamplerBorderColorComponentMappingCreateInfoEXT = VK_STRUCTURE_TYPE_SAMPLER_BORDER_COLOR_COMPONENT_MAPPING_CREATE_INFO_EXT, + ePhysicalDevicePageableDeviceLocalMemoryFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT, + ePhysicalDeviceShaderCorePropertiesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_ARM, + ePhysicalDeviceShaderSubgroupRotateFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES_KHR, + eDeviceQueueShaderCoreControlCreateInfoARM = VK_STRUCTURE_TYPE_DEVICE_QUEUE_SHADER_CORE_CONTROL_CREATE_INFO_ARM, + ePhysicalDeviceSchedulingControlsFeaturesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_FEATURES_ARM, + ePhysicalDeviceSchedulingControlsPropertiesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_PROPERTIES_ARM, + ePhysicalDeviceImageSlicedViewOf3DFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_SLICED_VIEW_OF_3D_FEATURES_EXT, + eImageViewSlicedCreateInfoEXT = VK_STRUCTURE_TYPE_IMAGE_VIEW_SLICED_CREATE_INFO_EXT, + ePhysicalDeviceDescriptorSetHostMappingFeaturesVALVE = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_SET_HOST_MAPPING_FEATURES_VALVE, + eDescriptorSetBindingReferenceVALVE = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_BINDING_REFERENCE_VALVE, + eDescriptorSetLayoutHostMappingInfoVALVE = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_HOST_MAPPING_INFO_VALVE, + ePhysicalDeviceDepthClampZeroOneFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_ZERO_ONE_FEATURES_EXT, + ePhysicalDeviceNonSeamlessCubeMapFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NON_SEAMLESS_CUBE_MAP_FEATURES_EXT, + ePhysicalDeviceRenderPassStripedFeaturesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RENDER_PASS_STRIPED_FEATURES_ARM, + ePhysicalDeviceRenderPassStripedPropertiesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RENDER_PASS_STRIPED_PROPERTIES_ARM, + eRenderPassStripeBeginInfoARM = VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_BEGIN_INFO_ARM, + eRenderPassStripeInfoARM = VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_INFO_ARM, + eRenderPassStripeSubmitInfoARM = VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_SUBMIT_INFO_ARM, + ePhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_FEATURES_QCOM, + ePhysicalDeviceFragmentDensityMapOffsetPropertiesQCOM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_PROPERTIES_QCOM, + eSubpassFragmentDensityMapOffsetEndInfoQCOM = VK_STRUCTURE_TYPE_SUBPASS_FRAGMENT_DENSITY_MAP_OFFSET_END_INFO_QCOM, + ePhysicalDeviceCopyMemoryIndirectFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_FEATURES_NV, + ePhysicalDeviceCopyMemoryIndirectPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_PROPERTIES_NV, + ePhysicalDeviceMemoryDecompressionFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_FEATURES_NV, + ePhysicalDeviceMemoryDecompressionPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_PROPERTIES_NV, + ePhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_COMPUTE_FEATURES_NV, + eComputePipelineIndirectBufferInfoNV = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_INDIRECT_BUFFER_INFO_NV, + ePipelineIndirectDeviceAddressInfoNV = VK_STRUCTURE_TYPE_PIPELINE_INDIRECT_DEVICE_ADDRESS_INFO_NV, + ePhysicalDeviceLinearColorAttachmentFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINEAR_COLOR_ATTACHMENT_FEATURES_NV, + ePhysicalDeviceShaderMaximalReconvergenceFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MAXIMAL_RECONVERGENCE_FEATURES_KHR, + ePhysicalDeviceImageCompressionControlSwapchainFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_FEATURES_EXT, + ePhysicalDeviceImageProcessingFeaturesQCOM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_FEATURES_QCOM, + ePhysicalDeviceImageProcessingPropertiesQCOM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_PROPERTIES_QCOM, + eImageViewSampleWeightCreateInfoQCOM = VK_STRUCTURE_TYPE_IMAGE_VIEW_SAMPLE_WEIGHT_CREATE_INFO_QCOM, + ePhysicalDeviceNestedCommandBufferFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NESTED_COMMAND_BUFFER_FEATURES_EXT, + ePhysicalDeviceNestedCommandBufferPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NESTED_COMMAND_BUFFER_PROPERTIES_EXT, + eExternalMemoryAcquireUnmodifiedEXT = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_ACQUIRE_UNMODIFIED_EXT, + ePhysicalDeviceExtendedDynamicState3FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_3_FEATURES_EXT, + ePhysicalDeviceExtendedDynamicState3PropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_3_PROPERTIES_EXT, + ePhysicalDeviceSubpassMergeFeedbackFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_MERGE_FEEDBACK_FEATURES_EXT, + eRenderPassCreationControlEXT = VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_CONTROL_EXT, + eRenderPassCreationFeedbackCreateInfoEXT = VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_FEEDBACK_CREATE_INFO_EXT, + eRenderPassSubpassFeedbackCreateInfoEXT = VK_STRUCTURE_TYPE_RENDER_PASS_SUBPASS_FEEDBACK_CREATE_INFO_EXT, + eDirectDriverLoadingInfoLUNARG = VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_INFO_LUNARG, + eDirectDriverLoadingListLUNARG = VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_LIST_LUNARG, + ePhysicalDeviceShaderModuleIdentifierFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_FEATURES_EXT, + ePhysicalDeviceShaderModuleIdentifierPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_PROPERTIES_EXT, + ePipelineShaderStageModuleIdentifierCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT, + eShaderModuleIdentifierEXT = VK_STRUCTURE_TYPE_SHADER_MODULE_IDENTIFIER_EXT, + ePhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_EXT, + ePhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_ARM, + ePhysicalDeviceOpticalFlowFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPTICAL_FLOW_FEATURES_NV, + ePhysicalDeviceOpticalFlowPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPTICAL_FLOW_PROPERTIES_NV, + eOpticalFlowImageFormatInfoNV = VK_STRUCTURE_TYPE_OPTICAL_FLOW_IMAGE_FORMAT_INFO_NV, + eOpticalFlowImageFormatPropertiesNV = VK_STRUCTURE_TYPE_OPTICAL_FLOW_IMAGE_FORMAT_PROPERTIES_NV, + eOpticalFlowSessionCreateInfoNV = VK_STRUCTURE_TYPE_OPTICAL_FLOW_SESSION_CREATE_INFO_NV, + eOpticalFlowExecuteInfoNV = VK_STRUCTURE_TYPE_OPTICAL_FLOW_EXECUTE_INFO_NV, + eOpticalFlowSessionCreatePrivateDataInfoNV = VK_STRUCTURE_TYPE_OPTICAL_FLOW_SESSION_CREATE_PRIVATE_DATA_INFO_NV, + ePhysicalDeviceLegacyDitheringFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_DITHERING_FEATURES_EXT, + ePhysicalDevicePipelineProtectedAccessFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES_EXT, #if defined( VK_USE_PLATFORM_ANDROID_KHR ) ePhysicalDeviceExternalFormatResolveFeaturesANDROID = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FORMAT_RESOLVE_FEATURES_ANDROID, ePhysicalDeviceExternalFormatResolvePropertiesANDROID = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FORMAT_RESOLVE_PROPERTIES_ANDROID, @@ -1342,12 +1324,29 @@ namespace VULKAN_HPP_NAMESPACE ePhysicalDeviceMaintenance5PropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_PROPERTIES_KHR, eRenderingAreaInfoKHR = VK_STRUCTURE_TYPE_RENDERING_AREA_INFO_KHR, eDeviceImageSubresourceInfoKHR = VK_STRUCTURE_TYPE_DEVICE_IMAGE_SUBRESOURCE_INFO_KHR, + eSubresourceLayout2KHR = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_KHR, + eSubresourceLayout2EXT = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_EXT, + eImageSubresource2KHR = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_KHR, + eImageSubresource2EXT = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_EXT, ePipelineCreateFlags2CreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR, eBufferUsageFlags2CreateInfoKHR = VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR, + ePhysicalDeviceAntiLagFeaturesAMD = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ANTI_LAG_FEATURES_AMD, + eAntiLagDataAMD = VK_STRUCTURE_TYPE_ANTI_LAG_DATA_AMD, + eAntiLagPresentationInfoAMD = VK_STRUCTURE_TYPE_ANTI_LAG_PRESENTATION_INFO_AMD, ePhysicalDeviceRayTracingPositionFetchFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_POSITION_FETCH_FEATURES_KHR, ePhysicalDeviceShaderObjectFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_OBJECT_FEATURES_EXT, ePhysicalDeviceShaderObjectPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_OBJECT_PROPERTIES_EXT, eShaderCreateInfoEXT = VK_STRUCTURE_TYPE_SHADER_CREATE_INFO_EXT, + ePhysicalDevicePipelineBinaryFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_BINARY_FEATURES_KHR, + ePipelineBinaryCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_BINARY_CREATE_INFO_KHR, + ePipelineBinaryInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_BINARY_INFO_KHR, + ePipelineBinaryKeyKHR = VK_STRUCTURE_TYPE_PIPELINE_BINARY_KEY_KHR, + ePhysicalDevicePipelineBinaryPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_BINARY_PROPERTIES_KHR, + eReleaseCapturedPipelineDataInfoKHR = VK_STRUCTURE_TYPE_RELEASE_CAPTURED_PIPELINE_DATA_INFO_KHR, + ePipelineBinaryDataInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_BINARY_DATA_INFO_KHR, + ePipelineCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_CREATE_INFO_KHR, + eDevicePipelineBinaryInternalCacheControlKHR = VK_STRUCTURE_TYPE_DEVICE_PIPELINE_BINARY_INTERNAL_CACHE_CONTROL_KHR, + ePipelineBinaryHandlesInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_BINARY_HANDLES_INFO_KHR, ePhysicalDeviceTilePropertiesFeaturesQCOM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_PROPERTIES_FEATURES_QCOM, eTilePropertiesQCOM = VK_STRUCTURE_TYPE_TILE_PROPERTIES_QCOM, ePhysicalDeviceAmigoProfilingFeaturesSEC = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_AMIGO_PROFILING_FEATURES_SEC, @@ -1357,6 +1356,12 @@ namespace VULKAN_HPP_NAMESPACE ePhysicalDeviceRayTracingInvocationReorderPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_INVOCATION_REORDER_PROPERTIES_NV, ePhysicalDeviceExtendedSparseAddressSpaceFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_SPARSE_ADDRESS_SPACE_FEATURES_NV, ePhysicalDeviceExtendedSparseAddressSpacePropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_SPARSE_ADDRESS_SPACE_PROPERTIES_NV, + ePhysicalDeviceMutableDescriptorTypeFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT, + ePhysicalDeviceMutableDescriptorTypeFeaturesVALVE = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE, + eMutableDescriptorTypeCreateInfoEXT = VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT, + eMutableDescriptorTypeCreateInfoVALVE = VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE, + ePhysicalDeviceLegacyVertexAttributesFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_VERTEX_ATTRIBUTES_FEATURES_EXT, + ePhysicalDeviceLegacyVertexAttributesPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_VERTEX_ATTRIBUTES_PROPERTIES_EXT, eLayerSettingsCreateInfoEXT = VK_STRUCTURE_TYPE_LAYER_SETTINGS_CREATE_INFO_EXT, ePhysicalDeviceShaderCoreBuiltinsFeaturesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_FEATURES_ARM, ePhysicalDeviceShaderCoreBuiltinsPropertiesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_PROPERTIES_ARM, @@ -1376,6 +1381,9 @@ namespace VULKAN_HPP_NAMESPACE ePhysicalDeviceCooperativeMatrixPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_KHR, ePhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_RENDER_AREAS_FEATURES_QCOM, eMultiviewPerViewRenderAreasRenderPassBeginInfoQCOM = VK_STRUCTURE_TYPE_MULTIVIEW_PER_VIEW_RENDER_AREAS_RENDER_PASS_BEGIN_INFO_QCOM, + ePhysicalDeviceComputeShaderDerivativesFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_KHR, + ePhysicalDeviceComputeShaderDerivativesFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV, + ePhysicalDeviceComputeShaderDerivativesPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_PROPERTIES_KHR, eVideoDecodeAv1CapabilitiesKHR = VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_CAPABILITIES_KHR, eVideoDecodeAv1PictureInfoKHR = VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_PICTURE_INFO_KHR, eVideoDecodeAv1ProfileInfoKHR = VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_PROFILE_INFO_KHR, @@ -1395,6 +1403,10 @@ namespace VULKAN_HPP_NAMESPACE ePhysicalDeviceCubicClampFeaturesQCOM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUBIC_CLAMP_FEATURES_QCOM, ePhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_FEATURES_EXT, ePhysicalDeviceVertexAttributeDivisorPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_KHR, + ePipelineVertexInputDivisorStateCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_KHR, + ePipelineVertexInputDivisorStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT, + ePhysicalDeviceVertexAttributeDivisorFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_KHR, + ePhysicalDeviceVertexAttributeDivisorFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT, ePhysicalDeviceShaderFloatControls2FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES_KHR, #if defined( VK_USE_PLATFORM_SCREEN_QNX ) eScreenBufferPropertiesQNX = VK_STRUCTURE_TYPE_SCREEN_BUFFER_PROPERTIES_QNX, @@ -1403,21 +1415,42 @@ namespace VULKAN_HPP_NAMESPACE eExternalFormatQNX = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_QNX, ePhysicalDeviceExternalMemoryScreenBufferFeaturesQNX = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_SCREEN_BUFFER_FEATURES_QNX, #endif /*VK_USE_PLATFORM_SCREEN_QNX*/ - ePhysicalDeviceLayeredDriverPropertiesMSFT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_DRIVER_PROPERTIES_MSFT, - ePhysicalDeviceShaderExpectAssumeFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES_KHR, - ePhysicalDeviceMaintenance6FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES_KHR, - ePhysicalDeviceMaintenance6PropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_PROPERTIES_KHR, - eBindMemoryStatusKHR = VK_STRUCTURE_TYPE_BIND_MEMORY_STATUS_KHR, - eBindDescriptorSetsInfoKHR = VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO_KHR, - ePushConstantsInfoKHR = VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO_KHR, - ePushDescriptorSetInfoKHR = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_INFO_KHR, - ePushDescriptorSetWithTemplateInfoKHR = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO_KHR, - eSetDescriptorBufferOffsetsInfoEXT = VK_STRUCTURE_TYPE_SET_DESCRIPTOR_BUFFER_OFFSETS_INFO_EXT, - eBindDescriptorBufferEmbeddedSamplersInfoEXT = VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_BUFFER_EMBEDDED_SAMPLERS_INFO_EXT, - ePhysicalDeviceDescriptorPoolOverallocationFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_POOL_OVERALLOCATION_FEATURES_NV, - ePhysicalDeviceRawAccessChainsFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAW_ACCESS_CHAINS_FEATURES_NV, - ePhysicalDeviceShaderAtomicFloat16VectorFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT16_VECTOR_FEATURES_NV, - ePhysicalDeviceRayTracingValidationFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_VALIDATION_FEATURES_NV + ePhysicalDeviceLayeredDriverPropertiesMSFT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_DRIVER_PROPERTIES_MSFT, + ePhysicalDeviceIndexTypeUint8FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_KHR, + ePhysicalDeviceIndexTypeUint8FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT, + ePhysicalDeviceLineRasterizationFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_KHR, + ePhysicalDeviceLineRasterizationFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT, + ePipelineRasterizationLineStateCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_KHR, + ePipelineRasterizationLineStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT, + ePhysicalDeviceLineRasterizationPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_KHR, + ePhysicalDeviceLineRasterizationPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT, + eCalibratedTimestampInfoKHR = VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_KHR, + eCalibratedTimestampInfoEXT = VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT, + ePhysicalDeviceShaderExpectAssumeFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES_KHR, + ePhysicalDeviceMaintenance6FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES_KHR, + ePhysicalDeviceMaintenance6PropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_PROPERTIES_KHR, + eBindMemoryStatusKHR = VK_STRUCTURE_TYPE_BIND_MEMORY_STATUS_KHR, + eBindDescriptorSetsInfoKHR = VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO_KHR, + ePushConstantsInfoKHR = VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO_KHR, + ePushDescriptorSetInfoKHR = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_INFO_KHR, + ePushDescriptorSetWithTemplateInfoKHR = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO_KHR, + eSetDescriptorBufferOffsetsInfoEXT = VK_STRUCTURE_TYPE_SET_DESCRIPTOR_BUFFER_OFFSETS_INFO_EXT, + eBindDescriptorBufferEmbeddedSamplersInfoEXT = VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_BUFFER_EMBEDDED_SAMPLERS_INFO_EXT, + ePhysicalDeviceDescriptorPoolOverallocationFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_POOL_OVERALLOCATION_FEATURES_NV, + ePhysicalDeviceRawAccessChainsFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAW_ACCESS_CHAINS_FEATURES_NV, + ePhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_RELAXED_EXTENDED_INSTRUCTION_FEATURES_KHR, + ePhysicalDeviceCommandBufferInheritanceFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMMAND_BUFFER_INHERITANCE_FEATURES_NV, + ePhysicalDeviceMaintenance7FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_7_FEATURES_KHR, + ePhysicalDeviceMaintenance7PropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_7_PROPERTIES_KHR, + ePhysicalDeviceLayeredApiPropertiesListKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_API_PROPERTIES_LIST_KHR, + ePhysicalDeviceLayeredApiPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_API_PROPERTIES_KHR, + ePhysicalDeviceLayeredApiVulkanPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_API_VULKAN_PROPERTIES_KHR, + ePhysicalDeviceShaderAtomicFloat16VectorFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT16_VECTOR_FEATURES_NV, + ePhysicalDeviceShaderReplicatedCompositesFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_REPLICATED_COMPOSITES_FEATURES_EXT, + ePhysicalDeviceRayTracingValidationFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_VALIDATION_FEATURES_NV, + ePhysicalDeviceImageAlignmentControlFeaturesMESA = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ALIGNMENT_CONTROL_FEATURES_MESA, + ePhysicalDeviceImageAlignmentControlPropertiesMESA = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ALIGNMENT_CONTROL_PROPERTIES_MESA, + eImageAlignmentControlCreateInfoMESA = VK_STRUCTURE_TYPE_IMAGE_ALIGNMENT_CONTROL_CREATE_INFO_MESA }; enum class PipelineCacheHeaderVersion @@ -1484,11 +1517,13 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VK_USE_PLATFORM_FUCHSIA*/ eMicromapEXT = VK_OBJECT_TYPE_MICROMAP_EXT, eOpticalFlowSessionNV = VK_OBJECT_TYPE_OPTICAL_FLOW_SESSION_NV, - eShaderEXT = VK_OBJECT_TYPE_SHADER_EXT + eShaderEXT = VK_OBJECT_TYPE_SHADER_EXT, + ePipelineBinaryKHR = VK_OBJECT_TYPE_PIPELINE_BINARY_KHR }; enum class VendorId { + eKhronos = VK_VENDOR_ID_KHRONOS, eVIV = VK_VENDOR_ID_VIV, eVSI = VK_VENDOR_ID_VSI, eKazan = VK_VENDOR_ID_KAZAN, @@ -1801,6 +1836,7 @@ namespace VULKAN_HPP_NAMESPACE ePvrtc14BppSrgbBlockIMG = VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG, ePvrtc22BppSrgbBlockIMG = VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG, ePvrtc24BppSrgbBlockIMG = VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG, + eR16G16Sfixed5NV = VK_FORMAT_R16G16_SFIXED5_NV, eR16G16S105NV = VK_FORMAT_R16G16_S10_5_NV, eA1B5G5R5UnormPack16KHR = VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR, eA8UnormKHR = VK_FORMAT_A8_UNORM_KHR @@ -1843,11 +1879,11 @@ namespace VULKAN_HPP_NAMESPACE eCositedChromaSamplesKHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR, eSampledImageFilterMinmax = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT, eSampledImageFilterMinmaxEXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT, - eSampledImageFilterCubicEXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT, - eSampledImageFilterCubicIMG = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG, eVideoDecodeOutputKHR = VK_FORMAT_FEATURE_VIDEO_DECODE_OUTPUT_BIT_KHR, eVideoDecodeDpbKHR = VK_FORMAT_FEATURE_VIDEO_DECODE_DPB_BIT_KHR, eAccelerationStructureVertexBufferKHR = VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR, + eSampledImageFilterCubicEXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT, + eSampledImageFilterCubicIMG = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG, eFragmentDensityMapEXT = VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT, eFragmentShadingRateAttachmentKHR = VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, eVideoEncodeInputKHR = VK_FORMAT_FEATURE_VIDEO_ENCODE_INPUT_BIT_KHR, @@ -1870,10 +1906,10 @@ namespace VULKAN_HPP_NAMESPACE FormatFeatureFlagBits::eSampledImageYcbcrConversionSeparateReconstructionFilter | FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicit | FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicitForceable | FormatFeatureFlagBits::eDisjoint | - FormatFeatureFlagBits::eCositedChromaSamples | FormatFeatureFlagBits::eSampledImageFilterMinmax | FormatFeatureFlagBits::eSampledImageFilterCubicEXT | - FormatFeatureFlagBits::eVideoDecodeOutputKHR | FormatFeatureFlagBits::eVideoDecodeDpbKHR | FormatFeatureFlagBits::eAccelerationStructureVertexBufferKHR | - FormatFeatureFlagBits::eFragmentDensityMapEXT | FormatFeatureFlagBits::eFragmentShadingRateAttachmentKHR | FormatFeatureFlagBits::eVideoEncodeInputKHR | - FormatFeatureFlagBits::eVideoEncodeDpbKHR; + FormatFeatureFlagBits::eCositedChromaSamples | FormatFeatureFlagBits::eSampledImageFilterMinmax | FormatFeatureFlagBits::eVideoDecodeOutputKHR | + FormatFeatureFlagBits::eVideoDecodeDpbKHR | FormatFeatureFlagBits::eAccelerationStructureVertexBufferKHR | + FormatFeatureFlagBits::eSampledImageFilterCubicEXT | FormatFeatureFlagBits::eFragmentDensityMapEXT | + FormatFeatureFlagBits::eFragmentShadingRateAttachmentKHR | FormatFeatureFlagBits::eVideoEncodeInputKHR | FormatFeatureFlagBits::eVideoEncodeDpbKHR; }; enum class ImageCreateFlagBits : VkImageCreateFlags @@ -1949,9 +1985,9 @@ namespace VULKAN_HPP_NAMESPACE eVideoDecodeDstKHR = VK_IMAGE_USAGE_VIDEO_DECODE_DST_BIT_KHR, eVideoDecodeSrcKHR = VK_IMAGE_USAGE_VIDEO_DECODE_SRC_BIT_KHR, eVideoDecodeDpbKHR = VK_IMAGE_USAGE_VIDEO_DECODE_DPB_BIT_KHR, + eFragmentDensityMapEXT = VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT, eFragmentShadingRateAttachmentKHR = VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, eShadingRateImageNV = VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, - eFragmentDensityMapEXT = VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT, eHostTransferEXT = VK_IMAGE_USAGE_HOST_TRANSFER_BIT_EXT, eVideoEncodeDstKHR = VK_IMAGE_USAGE_VIDEO_ENCODE_DST_BIT_KHR, eVideoEncodeSrcKHR = VK_IMAGE_USAGE_VIDEO_ENCODE_SRC_BIT_KHR, @@ -1972,7 +2008,7 @@ namespace VULKAN_HPP_NAMESPACE ImageUsageFlagBits::eTransferSrc | ImageUsageFlagBits::eTransferDst | ImageUsageFlagBits::eSampled | ImageUsageFlagBits::eStorage | ImageUsageFlagBits::eColorAttachment | ImageUsageFlagBits::eDepthStencilAttachment | ImageUsageFlagBits::eTransientAttachment | ImageUsageFlagBits::eInputAttachment | ImageUsageFlagBits::eVideoDecodeDstKHR | ImageUsageFlagBits::eVideoDecodeSrcKHR | - ImageUsageFlagBits::eVideoDecodeDpbKHR | ImageUsageFlagBits::eFragmentShadingRateAttachmentKHR | ImageUsageFlagBits::eFragmentDensityMapEXT | + ImageUsageFlagBits::eVideoDecodeDpbKHR | ImageUsageFlagBits::eFragmentDensityMapEXT | ImageUsageFlagBits::eFragmentShadingRateAttachmentKHR | ImageUsageFlagBits::eHostTransferEXT | ImageUsageFlagBits::eVideoEncodeDstKHR | ImageUsageFlagBits::eVideoEncodeSrcKHR | ImageUsageFlagBits::eVideoEncodeDpbKHR | ImageUsageFlagBits::eAttachmentFeedbackLoopEXT | ImageUsageFlagBits::eInvocationMaskHUAWEI | ImageUsageFlagBits::eSampleWeightQCOM | ImageUsageFlagBits::eSampleBlockMatchQCOM; @@ -2155,14 +2191,14 @@ namespace VULKAN_HPP_NAMESPACE eAccelerationStructureBuildNV = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV, eRayTracingShaderKHR = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, eRayTracingShaderNV = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV, + eFragmentDensityProcessEXT = VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT, eFragmentShadingRateAttachmentKHR = VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, eShadingRateImageNV = VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV, + eCommandPreprocessNV = VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV, eTaskShaderEXT = VK_PIPELINE_STAGE_TASK_SHADER_BIT_EXT, eTaskShaderNV = VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV, eMeshShaderEXT = VK_PIPELINE_STAGE_MESH_SHADER_BIT_EXT, - eMeshShaderNV = VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV, - eFragmentDensityProcessEXT = VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT, - eCommandPreprocessNV = VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV + eMeshShaderNV = VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV }; using PipelineStageFlags = Flags; @@ -2178,9 +2214,9 @@ namespace VULKAN_HPP_NAMESPACE PipelineStageFlagBits::eColorAttachmentOutput | PipelineStageFlagBits::eComputeShader | PipelineStageFlagBits::eTransfer | PipelineStageFlagBits::eBottomOfPipe | PipelineStageFlagBits::eHost | PipelineStageFlagBits::eAllGraphics | PipelineStageFlagBits::eAllCommands | PipelineStageFlagBits::eNone | PipelineStageFlagBits::eTransformFeedbackEXT | PipelineStageFlagBits::eConditionalRenderingEXT | - PipelineStageFlagBits::eAccelerationStructureBuildKHR | PipelineStageFlagBits::eRayTracingShaderKHR | - PipelineStageFlagBits::eFragmentShadingRateAttachmentKHR | PipelineStageFlagBits::eTaskShaderEXT | PipelineStageFlagBits::eMeshShaderEXT | - PipelineStageFlagBits::eFragmentDensityProcessEXT | PipelineStageFlagBits::eCommandPreprocessNV; + PipelineStageFlagBits::eAccelerationStructureBuildKHR | PipelineStageFlagBits::eRayTracingShaderKHR | PipelineStageFlagBits::eFragmentDensityProcessEXT | + PipelineStageFlagBits::eFragmentShadingRateAttachmentKHR | PipelineStageFlagBits::eCommandPreprocessNV | PipelineStageFlagBits::eTaskShaderEXT | + PipelineStageFlagBits::eMeshShaderEXT; }; enum class MemoryMapFlagBits : VkMemoryMapFlags @@ -2523,9 +2559,9 @@ namespace VULKAN_HPP_NAMESPACE eVideoDecodeSrcKHR = VK_IMAGE_LAYOUT_VIDEO_DECODE_SRC_KHR, eVideoDecodeDpbKHR = VK_IMAGE_LAYOUT_VIDEO_DECODE_DPB_KHR, eSharedPresentKHR = VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, + eFragmentDensityMapOptimalEXT = VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT, eFragmentShadingRateAttachmentOptimalKHR = VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR, eShadingRateOptimalNV = VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV, - eFragmentDensityMapOptimalEXT = VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT, eRenderingLocalReadKHR = VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ_KHR, eVideoEncodeDstKHR = VK_IMAGE_LAYOUT_VIDEO_ENCODE_DST_KHR, eVideoEncodeSrcKHR = VK_IMAGE_LAYOUT_VIDEO_ENCODE_SRC_KHR, @@ -2779,8 +2815,6 @@ namespace VULKAN_HPP_NAMESPACE eExclusiveScissorEnableNV = VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_ENABLE_NV, eExclusiveScissorNV = VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV, eFragmentShadingRateKHR = VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR, - eLineStippleKHR = VK_DYNAMIC_STATE_LINE_STIPPLE_KHR, - eLineStippleEXT = VK_DYNAMIC_STATE_LINE_STIPPLE_EXT, eVertexInputEXT = VK_DYNAMIC_STATE_VERTEX_INPUT_EXT, ePatchControlPointsEXT = VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT, eLogicOpEXT = VK_DYNAMIC_STATE_LOGIC_OP_EXT, @@ -2816,7 +2850,9 @@ namespace VULKAN_HPP_NAMESPACE eShadingRateImageEnableNV = VK_DYNAMIC_STATE_SHADING_RATE_IMAGE_ENABLE_NV, eRepresentativeFragmentTestEnableNV = VK_DYNAMIC_STATE_REPRESENTATIVE_FRAGMENT_TEST_ENABLE_NV, eCoverageReductionModeNV = VK_DYNAMIC_STATE_COVERAGE_REDUCTION_MODE_NV, - eAttachmentFeedbackLoopEnableEXT = VK_DYNAMIC_STATE_ATTACHMENT_FEEDBACK_LOOP_ENABLE_EXT + eAttachmentFeedbackLoopEnableEXT = VK_DYNAMIC_STATE_ATTACHMENT_FEEDBACK_LOOP_ENABLE_EXT, + eLineStippleKHR = VK_DYNAMIC_STATE_LINE_STIPPLE_KHR, + eLineStippleEXT = VK_DYNAMIC_STATE_LINE_STIPPLE_EXT }; enum class FrontFace @@ -3241,9 +3277,9 @@ namespace VULKAN_HPP_NAMESPACE ePushDescriptorKHR = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR, eDescriptorBufferEXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_DESCRIPTOR_BUFFER_BIT_EXT, eEmbeddedImmutableSamplersEXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_EMBEDDED_IMMUTABLE_SAMPLERS_BIT_EXT, + eIndirectBindableNV = VK_DESCRIPTOR_SET_LAYOUT_CREATE_INDIRECT_BINDABLE_BIT_NV, eHostOnlyPoolEXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_EXT, eHostOnlyPoolVALVE = VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE, - eIndirectBindableNV = VK_DESCRIPTOR_SET_LAYOUT_CREATE_INDIRECT_BINDABLE_BIT_NV, ePerStageNV = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PER_STAGE_BIT_NV }; @@ -3256,7 +3292,7 @@ namespace VULKAN_HPP_NAMESPACE static VULKAN_HPP_CONST_OR_CONSTEXPR DescriptorSetLayoutCreateFlags allFlags = DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool | DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR | DescriptorSetLayoutCreateFlagBits::eDescriptorBufferEXT | DescriptorSetLayoutCreateFlagBits::eEmbeddedImmutableSamplersEXT | - DescriptorSetLayoutCreateFlagBits::eHostOnlyPoolEXT | DescriptorSetLayoutCreateFlagBits::eIndirectBindableNV | + DescriptorSetLayoutCreateFlagBits::eIndirectBindableNV | DescriptorSetLayoutCreateFlagBits::eHostOnlyPoolEXT | DescriptorSetLayoutCreateFlagBits::ePerStageNV; }; @@ -3277,10 +3313,10 @@ namespace VULKAN_HPP_NAMESPACE eInlineUniformBlockEXT = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT, eAccelerationStructureKHR = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, eAccelerationStructureNV = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV, - eMutableEXT = VK_DESCRIPTOR_TYPE_MUTABLE_EXT, - eMutableVALVE = VK_DESCRIPTOR_TYPE_MUTABLE_VALVE, eSampleWeightImageQCOM = VK_DESCRIPTOR_TYPE_SAMPLE_WEIGHT_IMAGE_QCOM, - eBlockMatchImageQCOM = VK_DESCRIPTOR_TYPE_BLOCK_MATCH_IMAGE_QCOM + eBlockMatchImageQCOM = VK_DESCRIPTOR_TYPE_BLOCK_MATCH_IMAGE_QCOM, + eMutableEXT = VK_DESCRIPTOR_TYPE_MUTABLE_EXT, + eMutableVALVE = VK_DESCRIPTOR_TYPE_MUTABLE_VALVE }; enum class DescriptorPoolResetFlagBits : VkDescriptorPoolResetFlags @@ -3326,9 +3362,9 @@ namespace VULKAN_HPP_NAMESPACE eAccelerationStructureReadNV = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV, eAccelerationStructureWriteKHR = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR, eAccelerationStructureWriteNV = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV, + eFragmentDensityMapReadEXT = VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT, eFragmentShadingRateAttachmentReadKHR = VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR, eShadingRateImageReadNV = VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV, - eFragmentDensityMapReadEXT = VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT, eCommandPreprocessReadNV = VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV, eCommandPreprocessWriteNV = VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV }; @@ -3346,8 +3382,8 @@ namespace VULKAN_HPP_NAMESPACE AccessFlagBits::eTransferRead | AccessFlagBits::eTransferWrite | AccessFlagBits::eHostRead | AccessFlagBits::eHostWrite | AccessFlagBits::eMemoryRead | AccessFlagBits::eMemoryWrite | AccessFlagBits::eNone | AccessFlagBits::eTransformFeedbackWriteEXT | AccessFlagBits::eTransformFeedbackCounterReadEXT | AccessFlagBits::eTransformFeedbackCounterWriteEXT | AccessFlagBits::eConditionalRenderingReadEXT | AccessFlagBits::eColorAttachmentReadNoncoherentEXT | - AccessFlagBits::eAccelerationStructureReadKHR | AccessFlagBits::eAccelerationStructureWriteKHR | AccessFlagBits::eFragmentShadingRateAttachmentReadKHR | - AccessFlagBits::eFragmentDensityMapReadEXT | AccessFlagBits::eCommandPreprocessReadNV | AccessFlagBits::eCommandPreprocessWriteNV; + AccessFlagBits::eAccelerationStructureReadKHR | AccessFlagBits::eAccelerationStructureWriteKHR | AccessFlagBits::eFragmentDensityMapReadEXT | + AccessFlagBits::eFragmentShadingRateAttachmentReadKHR | AccessFlagBits::eCommandPreprocessReadNV | AccessFlagBits::eCommandPreprocessWriteNV; }; enum class AttachmentDescriptionFlagBits : VkAttachmentDescriptionFlags @@ -3564,6 +3600,51 @@ namespace VULKAN_HPP_NAMESPACE eUint8EXT = VK_INDEX_TYPE_UINT8_EXT }; + //========================= + //=== Index Type Traits === + //========================= + + template + struct IndexTypeValue + { + }; + + template <> + struct IndexTypeValue + { + static VULKAN_HPP_CONST_OR_CONSTEXPR IndexType value = IndexType::eUint16; + }; + + template <> + struct CppType + { + using Type = uint16_t; + }; + + template <> + struct IndexTypeValue + { + static VULKAN_HPP_CONST_OR_CONSTEXPR IndexType value = IndexType::eUint32; + }; + + template <> + struct CppType + { + using Type = uint32_t; + }; + + template <> + struct IndexTypeValue + { + static VULKAN_HPP_CONST_OR_CONSTEXPR IndexType value = IndexType::eUint8KHR; + }; + + template <> + struct CppType + { + using Type = uint8_t; + }; + enum class StencilFaceFlagBits : VkStencilFaceFlags { eFront = VK_STENCIL_FACE_FRONT_BIT, @@ -3586,6 +3667,7 @@ namespace VULKAN_HPP_NAMESPACE { eInline = VK_SUBPASS_CONTENTS_INLINE, eSecondaryCommandBuffers = VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS, + eInlineAndSecondaryCommandBuffersKHR = VK_SUBPASS_CONTENTS_INLINE_AND_SECONDARY_COMMAND_BUFFERS_KHR, eInlineAndSecondaryCommandBuffersEXT = VK_SUBPASS_CONTENTS_INLINE_AND_SECONDARY_COMMAND_BUFFERS_EXT }; @@ -3947,7 +4029,8 @@ namespace VULKAN_HPP_NAMESPACE eMesaDozen = VK_DRIVER_ID_MESA_DOZEN, eMesaNvk = VK_DRIVER_ID_MESA_NVK, eImaginationOpenSourceMESA = VK_DRIVER_ID_IMAGINATION_OPEN_SOURCE_MESA, - eMesaAgxv = VK_DRIVER_ID_MESA_AGXV + eMesaHoneykrisp = VK_DRIVER_ID_MESA_HONEYKRISP, + eReserved27 = VK_DRIVER_ID_RESERVED_27 }; using DriverIdKHR = DriverId; @@ -4274,8 +4357,9 @@ namespace VULKAN_HPP_NAMESPACE eContentsSecondaryCommandBuffers = VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT, eSuspending = VK_RENDERING_SUSPENDING_BIT, eResuming = VK_RENDERING_RESUMING_BIT, - eContentsInlineEXT = VK_RENDERING_CONTENTS_INLINE_BIT_EXT, - eEnableLegacyDitheringEXT = VK_RENDERING_ENABLE_LEGACY_DITHERING_BIT_EXT + eEnableLegacyDitheringEXT = VK_RENDERING_ENABLE_LEGACY_DITHERING_BIT_EXT, + eContentsInlineKHR = VK_RENDERING_CONTENTS_INLINE_BIT_KHR, + eContentsInlineEXT = VK_RENDERING_CONTENTS_INLINE_BIT_EXT }; using RenderingFlagBitsKHR = RenderingFlagBits; @@ -4287,8 +4371,8 @@ namespace VULKAN_HPP_NAMESPACE { static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; static VULKAN_HPP_CONST_OR_CONSTEXPR RenderingFlags allFlags = RenderingFlagBits::eContentsSecondaryCommandBuffers | RenderingFlagBits::eSuspending | - RenderingFlagBits::eResuming | RenderingFlagBits::eContentsInlineEXT | - RenderingFlagBits::eEnableLegacyDitheringEXT; + RenderingFlagBits::eResuming | RenderingFlagBits::eEnableLegacyDitheringEXT | + RenderingFlagBits::eContentsInlineKHR; }; enum class FormatFeatureFlagBits2 : VkFormatFeatureFlags2 @@ -6213,7 +6297,7 @@ namespace VULKAN_HPP_NAMESPACE enum class VideoEncodeCapabilityFlagBitsKHR : VkVideoEncodeCapabilityFlagsKHR { ePrecedingExternallyEncodedBytes = VK_VIDEO_ENCODE_CAPABILITY_PRECEDING_EXTERNALLY_ENCODED_BYTES_BIT_KHR, - eInsufficientstreamBufferRangeDetectionBit = VK_VIDEO_ENCODE_CAPABILITY_INSUFFICIENT_BITSTREAM_BUFFER_RANGE_DETECTION_BIT_KHR + eInsufficientBitstreamBufferRangeDetection = VK_VIDEO_ENCODE_CAPABILITY_INSUFFICIENT_BITSTREAM_BUFFER_RANGE_DETECTION_BIT_KHR }; using VideoEncodeCapabilityFlagsKHR = Flags; @@ -6223,14 +6307,14 @@ namespace VULKAN_HPP_NAMESPACE { static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; static VULKAN_HPP_CONST_OR_CONSTEXPR VideoEncodeCapabilityFlagsKHR allFlags = - VideoEncodeCapabilityFlagBitsKHR::ePrecedingExternallyEncodedBytes | VideoEncodeCapabilityFlagBitsKHR::eInsufficientstreamBufferRangeDetectionBit; + VideoEncodeCapabilityFlagBitsKHR::ePrecedingExternallyEncodedBytes | VideoEncodeCapabilityFlagBitsKHR::eInsufficientBitstreamBufferRangeDetection; }; enum class VideoEncodeFeedbackFlagBitsKHR : VkVideoEncodeFeedbackFlagsKHR { - estreamBufferOffsetBit = VK_VIDEO_ENCODE_FEEDBACK_BITSTREAM_BUFFER_OFFSET_BIT_KHR, - estreamBytesWrittenBit = VK_VIDEO_ENCODE_FEEDBACK_BITSTREAM_BYTES_WRITTEN_BIT_KHR, - estreamHasOverridesBit = VK_VIDEO_ENCODE_FEEDBACK_BITSTREAM_HAS_OVERRIDES_BIT_KHR + eBitstreamBufferOffset = VK_VIDEO_ENCODE_FEEDBACK_BITSTREAM_BUFFER_OFFSET_BIT_KHR, + eBitstreamBytesWritten = VK_VIDEO_ENCODE_FEEDBACK_BITSTREAM_BYTES_WRITTEN_BIT_KHR, + eBitstreamHasOverrides = VK_VIDEO_ENCODE_FEEDBACK_BITSTREAM_HAS_OVERRIDES_BIT_KHR }; using VideoEncodeFeedbackFlagsKHR = Flags; @@ -6239,9 +6323,9 @@ namespace VULKAN_HPP_NAMESPACE struct FlagTraits { static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; - static VULKAN_HPP_CONST_OR_CONSTEXPR VideoEncodeFeedbackFlagsKHR allFlags = VideoEncodeFeedbackFlagBitsKHR::estreamBufferOffsetBit | - VideoEncodeFeedbackFlagBitsKHR::estreamBytesWrittenBit | - VideoEncodeFeedbackFlagBitsKHR::estreamHasOverridesBit; + static VULKAN_HPP_CONST_OR_CONSTEXPR VideoEncodeFeedbackFlagsKHR allFlags = VideoEncodeFeedbackFlagBitsKHR::eBitstreamBufferOffset | + VideoEncodeFeedbackFlagBitsKHR::eBitstreamBytesWritten | + VideoEncodeFeedbackFlagBitsKHR::eBitstreamHasOverrides; }; enum class VideoEncodeUsageFlagBitsKHR : VkVideoEncodeUsageFlagsKHR @@ -6917,6 +7001,7 @@ namespace VULKAN_HPP_NAMESPACE eDisableOptimization = VK_PIPELINE_CREATE_2_DISABLE_OPTIMIZATION_BIT_KHR, eAllowDerivatives = VK_PIPELINE_CREATE_2_ALLOW_DERIVATIVES_BIT_KHR, eDerivative = VK_PIPELINE_CREATE_2_DERIVATIVE_BIT_KHR, + eEnableLegacyDitheringEXT = VK_PIPELINE_CREATE_2_ENABLE_LEGACY_DITHERING_BIT_EXT, eViewIndexFromDeviceIndex = VK_PIPELINE_CREATE_2_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR, eDispatchBase = VK_PIPELINE_CREATE_2_DISPATCH_BASE_BIT_KHR, eDeferCompileNV = VK_PIPELINE_CREATE_2_DEFER_COMPILE_BIT_NV, @@ -6944,7 +7029,8 @@ namespace VULKAN_HPP_NAMESPACE eNoProtectedAccessEXT = VK_PIPELINE_CREATE_2_NO_PROTECTED_ACCESS_BIT_EXT, eProtectedAccessOnlyEXT = VK_PIPELINE_CREATE_2_PROTECTED_ACCESS_ONLY_BIT_EXT, eRayTracingDisplacementMicromapNV = VK_PIPELINE_CREATE_2_RAY_TRACING_DISPLACEMENT_MICROMAP_BIT_NV, - eDescriptorBufferEXT = VK_PIPELINE_CREATE_2_DESCRIPTOR_BUFFER_BIT_EXT + eDescriptorBufferEXT = VK_PIPELINE_CREATE_2_DESCRIPTOR_BUFFER_BIT_EXT, + eCaptureData = VK_PIPELINE_CREATE_2_CAPTURE_DATA_BIT_KHR }; using PipelineCreateFlags2KHR = Flags; @@ -6955,11 +7041,12 @@ namespace VULKAN_HPP_NAMESPACE static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; static VULKAN_HPP_CONST_OR_CONSTEXPR PipelineCreateFlags2KHR allFlags = PipelineCreateFlagBits2KHR::eDisableOptimization | PipelineCreateFlagBits2KHR::eAllowDerivatives | PipelineCreateFlagBits2KHR::eDerivative | - PipelineCreateFlagBits2KHR::eViewIndexFromDeviceIndex | PipelineCreateFlagBits2KHR::eDispatchBase | PipelineCreateFlagBits2KHR::eDeferCompileNV | - PipelineCreateFlagBits2KHR::eCaptureStatistics | PipelineCreateFlagBits2KHR::eCaptureInternalRepresentations | - PipelineCreateFlagBits2KHR::eFailOnPipelineCompileRequired | PipelineCreateFlagBits2KHR::eEarlyReturnOnFailure | - PipelineCreateFlagBits2KHR::eLinkTimeOptimizationEXT | PipelineCreateFlagBits2KHR::eRetainLinkTimeOptimizationInfoEXT | - PipelineCreateFlagBits2KHR::eLibrary | PipelineCreateFlagBits2KHR::eRayTracingSkipTriangles | PipelineCreateFlagBits2KHR::eRayTracingSkipAabbs | + PipelineCreateFlagBits2KHR::eEnableLegacyDitheringEXT | PipelineCreateFlagBits2KHR::eViewIndexFromDeviceIndex | + PipelineCreateFlagBits2KHR::eDispatchBase | PipelineCreateFlagBits2KHR::eDeferCompileNV | PipelineCreateFlagBits2KHR::eCaptureStatistics | + PipelineCreateFlagBits2KHR::eCaptureInternalRepresentations | PipelineCreateFlagBits2KHR::eFailOnPipelineCompileRequired | + PipelineCreateFlagBits2KHR::eEarlyReturnOnFailure | PipelineCreateFlagBits2KHR::eLinkTimeOptimizationEXT | + PipelineCreateFlagBits2KHR::eRetainLinkTimeOptimizationInfoEXT | PipelineCreateFlagBits2KHR::eLibrary | + PipelineCreateFlagBits2KHR::eRayTracingSkipTriangles | PipelineCreateFlagBits2KHR::eRayTracingSkipAabbs | PipelineCreateFlagBits2KHR::eRayTracingNoNullAnyHitShaders | PipelineCreateFlagBits2KHR::eRayTracingNoNullClosestHitShaders | PipelineCreateFlagBits2KHR::eRayTracingNoNullMissShaders | PipelineCreateFlagBits2KHR::eRayTracingNoNullIntersectionShaders | PipelineCreateFlagBits2KHR::eRayTracingShaderGroupHandleCaptureReplay | PipelineCreateFlagBits2KHR::eIndirectBindableNV | @@ -6967,7 +7054,8 @@ namespace VULKAN_HPP_NAMESPACE PipelineCreateFlagBits2KHR::eRenderingFragmentDensityMapAttachmentEXT | PipelineCreateFlagBits2KHR::eRayTracingOpacityMicromapEXT | PipelineCreateFlagBits2KHR::eColorAttachmentFeedbackLoopEXT | PipelineCreateFlagBits2KHR::eDepthStencilAttachmentFeedbackLoopEXT | PipelineCreateFlagBits2KHR::eNoProtectedAccessEXT | PipelineCreateFlagBits2KHR::eProtectedAccessOnlyEXT | - PipelineCreateFlagBits2KHR::eRayTracingDisplacementMicromapNV | PipelineCreateFlagBits2KHR::eDescriptorBufferEXT; + PipelineCreateFlagBits2KHR::eRayTracingDisplacementMicromapNV | PipelineCreateFlagBits2KHR::eDescriptorBufferEXT | + PipelineCreateFlagBits2KHR::eCaptureData; }; enum class BufferUsageFlagBits2KHR : VkBufferUsageFlags2KHR @@ -7026,6 +7114,21 @@ namespace VULKAN_HPP_NAMESPACE BufferUsageFlagBits2KHR::eMicromapStorageEXT; }; + //=== VK_AMD_anti_lag === + + enum class AntiLagModeAMD + { + eDriverControl = VK_ANTI_LAG_MODE_DRIVER_CONTROL_AMD, + eOn = VK_ANTI_LAG_MODE_ON_AMD, + eOff = VK_ANTI_LAG_MODE_OFF_AMD + }; + + enum class AntiLagStageAMD + { + eInput = VK_ANTI_LAG_STAGE_INPUT_AMD, + ePresent = VK_ANTI_LAG_STAGE_PRESENT_AMD + }; + //=== VK_EXT_shader_object === enum class ShaderCreateFlagBitsEXT : VkShaderCreateFlagsEXT @@ -7079,6 +7182,75 @@ namespace VULKAN_HPP_NAMESPACE eString = VK_LAYER_SETTING_TYPE_STRING_EXT }; + //================================= + //=== Layer Setting Type Traits === + //================================= + + template <> + struct CppType + { + using Type = vk::Bool32; + }; + + template <> + struct CppType + { + using Type = int32_t; + }; + + template <> + struct CppType + { + using Type = int64_t; + }; + + template <> + struct CppType + { + using Type = uint32_t; + }; + + template <> + struct CppType + { + using Type = uint64_t; + }; + + template <> + struct CppType + { + using Type = float; + }; + + template <> + struct CppType + { + using Type = double; + }; + + template <> + struct CppType + { + using Type = char *; + }; + + template + bool isSameType( LayerSettingTypeEXT layerSettingType ) + { + switch ( layerSettingType ) + { + case LayerSettingTypeEXT::eBool32: return std::is_same::value; + case LayerSettingTypeEXT::eInt32: return std::is_same::value; + case LayerSettingTypeEXT::eInt64: return std::is_same::value; + case LayerSettingTypeEXT::eUint32: return std::is_same::value; + case LayerSettingTypeEXT::eUint64: return std::is_same::value; + case LayerSettingTypeEXT::eFloat32: return std::is_same::value; + case LayerSettingTypeEXT::eFloat64: return std::is_same::value; + case LayerSettingTypeEXT::eString: return std::is_same::value; + default: return false; + } + } + //=== VK_NV_low_latency2 === enum class LatencyMarkerNV @@ -7178,49 +7350,15 @@ namespace VULKAN_HPP_NAMESPACE }; using TimeDomainEXT = TimeDomainKHR; - //========================= - //=== Index Type Traits === - //========================= + //=== VK_KHR_maintenance7 === - template - struct IndexTypeValue + enum class PhysicalDeviceLayeredApiKHR { - }; - - template <> - struct IndexTypeValue - { - static VULKAN_HPP_CONST_OR_CONSTEXPR IndexType value = IndexType::eUint16; - }; - - template <> - struct CppType - { - using Type = uint16_t; - }; - - template <> - struct IndexTypeValue - { - static VULKAN_HPP_CONST_OR_CONSTEXPR IndexType value = IndexType::eUint32; - }; - - template <> - struct CppType - { - using Type = uint32_t; - }; - - template <> - struct IndexTypeValue - { - static VULKAN_HPP_CONST_OR_CONSTEXPR IndexType value = IndexType::eUint8KHR; - }; - - template <> - struct CppType - { - using Type = uint8_t; + eVulkan = VK_PHYSICAL_DEVICE_LAYERED_API_VULKAN_KHR, + eD3D12 = VK_PHYSICAL_DEVICE_LAYERED_API_D3D12_KHR, + eMetal = VK_PHYSICAL_DEVICE_LAYERED_API_METAL_KHR, + eOpengl = VK_PHYSICAL_DEVICE_LAYERED_API_OPENGL_KHR, + eOpengles = VK_PHYSICAL_DEVICE_LAYERED_API_OPENGLES_KHR }; //=========================================================== @@ -7342,7 +7480,11 @@ namespace VULKAN_HPP_NAMESPACE return VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; //=== VK_EXT_shader_object === - case VULKAN_HPP_NAMESPACE::ObjectType::eShaderEXT: return VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + case VULKAN_HPP_NAMESPACE::ObjectType::eShaderEXT: + return VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + + //=== VK_KHR_pipeline_binary === + case VULKAN_HPP_NAMESPACE::ObjectType::ePipelineBinaryKHR: return VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; default: VULKAN_HPP_ASSERT( false && "unknown ObjectType" ); return VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; } diff --git a/third_party/vulkan/vulkan_extension_inspection.hpp b/third_party/vulkan/vulkan_extension_inspection.hpp index d9df2f6..da405d5 100644 --- a/third_party/vulkan/vulkan_extension_inspection.hpp +++ b/third_party/vulkan/vulkan_extension_inspection.hpp @@ -63,6 +63,7 @@ namespace VULKAN_HPP_NAMESPACE { "VK_MVK_macos_surface", "VK_EXT_metal_surface" }, #endif /*VK_USE_PLATFORM_MACOS_MVK*/ { "VK_AMD_gpu_shader_int16", "VK_KHR_shader_float16_int8" }, + { "VK_NV_ray_tracing", "VK_KHR_ray_tracing_pipeline" }, { "VK_EXT_buffer_device_address", "VK_KHR_buffer_device_address" }, { "VK_EXT_validation_features", "VK_EXT_layer_settings" } }; @@ -399,20 +400,24 @@ namespace VULKAN_HPP_NAMESPACE "VK_ANDROID_external_format_resolve", #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ "VK_KHR_maintenance5", + "VK_AMD_anti_lag", "VK_KHR_ray_tracing_position_fetch", "VK_EXT_shader_object", + "VK_KHR_pipeline_binary", "VK_QCOM_tile_properties", "VK_SEC_amigo_profiling", "VK_QCOM_multiview_per_view_viewports", "VK_NV_ray_tracing_invocation_reorder", "VK_NV_extended_sparse_address_space", "VK_EXT_mutable_descriptor_type", + "VK_EXT_legacy_vertex_attributes", "VK_ARM_shader_core_builtins", "VK_EXT_pipeline_library_group_handles", "VK_EXT_dynamic_rendering_unused_attachments", "VK_NV_low_latency2", "VK_KHR_cooperative_matrix", "VK_QCOM_multiview_per_view_render_areas", + "VK_KHR_compute_shader_derivatives", "VK_KHR_video_decode_av1", "VK_KHR_video_maintenance1", "VK_NV_per_stage_descriptor_set", @@ -435,8 +440,13 @@ namespace VULKAN_HPP_NAMESPACE "VK_KHR_maintenance6", "VK_NV_descriptor_pool_overallocation", "VK_NV_raw_access_chains", + "VK_KHR_shader_relaxed_extended_instruction", + "VK_NV_command_buffer_inheritance", + "VK_KHR_maintenance7", "VK_NV_shader_atomic_float16_vector", - "VK_NV_ray_tracing_validation" + "VK_EXT_shader_replicated_composites", + "VK_NV_ray_tracing_validation", + "VK_MESA_image_alignment_control" }; return deviceExtensions; } @@ -2145,6 +2155,11 @@ namespace VULKAN_HPP_NAMESPACE "VK_KHR_dynamic_rendering", } } }, { "VK_VERSION_1_3", { {} } } } }, + { "VK_KHR_pipeline_binary", + { { "VK_VERSION_1_0", + { { + "VK_KHR_maintenance5", + } } } } }, { "VK_QCOM_tile_properties", { { "VK_VERSION_1_0", { { @@ -2173,6 +2188,11 @@ namespace VULKAN_HPP_NAMESPACE { { "VK_KHR_maintenance3", } } } } }, + { "VK_EXT_legacy_vertex_attributes", + { { "VK_VERSION_1_0", + { { + "VK_EXT_vertex_input_dynamic_state", + } } } } }, { "VK_ARM_shader_core_builtins", { { "VK_VERSION_1_0", { { @@ -2207,6 +2227,11 @@ namespace VULKAN_HPP_NAMESPACE "VK_KHR_get_physical_device_properties2", } } }, { "VK_VERSION_1_1", { {} } } } }, + { "VK_KHR_compute_shader_derivatives", + { { "VK_VERSION_1_0", + { { + "VK_KHR_get_physical_device_properties2", + } } } } }, { "VK_KHR_video_decode_av1", { { "VK_VERSION_1_0", { { @@ -2306,7 +2331,14 @@ namespace VULKAN_HPP_NAMESPACE } } }, { "VK_VERSION_1_1", { {} } } } }, { "VK_KHR_maintenance6", { { "VK_VERSION_1_1", { {} } } } }, - { "VK_NV_descriptor_pool_overallocation", { { "VK_VERSION_1_1", { {} } } } } + { "VK_NV_descriptor_pool_overallocation", { { "VK_VERSION_1_1", { {} } } } }, + { "VK_KHR_maintenance7", { { "VK_VERSION_1_1", { {} } } } }, + { "VK_MESA_image_alignment_control", + { { "VK_VERSION_1_0", + { { + "VK_KHR_get_physical_device_properties2", + } } }, + { "VK_VERSION_1_1", { {} } } } } }; auto depIt = dependencies.find( extension ); return ( depIt != dependencies.end() ) ? depIt->second : noDependencies; @@ -2400,6 +2432,7 @@ namespace VULKAN_HPP_NAMESPACE { "VK_KHR_driver_properties", "VK_VERSION_1_2" }, { "VK_KHR_shader_float_controls", "VK_VERSION_1_2" }, { "VK_KHR_depth_stencil_resolve", "VK_VERSION_1_2" }, + { "VK_NV_compute_shader_derivatives", "VK_KHR_compute_shader_derivatives" }, { "VK_NV_fragment_shader_barycentric", "VK_KHR_fragment_shader_barycentric" }, { "VK_KHR_timeline_semaphore", "VK_VERSION_1_2" }, { "VK_KHR_vulkan_memory_model", "VK_VERSION_1_2" }, @@ -2503,6 +2536,10 @@ namespace VULKAN_HPP_NAMESPACE { return "VK_KHR_shader_float16_int8"; } + if ( extension == "VK_NV_ray_tracing" ) + { + return "VK_KHR_ray_tracing_pipeline"; + } if ( extension == "VK_EXT_buffer_device_address" ) { return "VK_KHR_buffer_device_address"; @@ -2719,6 +2756,10 @@ namespace VULKAN_HPP_NAMESPACE { return "VK_VERSION_1_2"; } + if ( extension == "VK_NV_compute_shader_derivatives" ) + { + return "VK_KHR_compute_shader_derivatives"; + } if ( extension == "VK_NV_fragment_shader_barycentric" ) { return "VK_KHR_fragment_shader_barycentric"; @@ -2877,7 +2918,8 @@ namespace VULKAN_HPP_NAMESPACE #if defined( VK_USE_PLATFORM_MACOS_MVK ) ( extension == "VK_MVK_macos_surface" ) || #endif /*VK_USE_PLATFORM_MACOS_MVK*/ - ( extension == "VK_AMD_gpu_shader_int16" ) || ( extension == "VK_EXT_buffer_device_address" ) || ( extension == "VK_EXT_validation_features" ); + ( extension == "VK_AMD_gpu_shader_int16" ) || ( extension == "VK_NV_ray_tracing" ) || ( extension == "VK_EXT_buffer_device_address" ) || + ( extension == "VK_EXT_validation_features" ); } VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR_20 bool isDeviceExtension( std::string const & extension ) @@ -3052,14 +3094,16 @@ namespace VULKAN_HPP_NAMESPACE #if defined( VK_USE_PLATFORM_ANDROID_KHR ) || ( extension == "VK_ANDROID_external_format_resolve" ) #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ - || ( extension == "VK_KHR_maintenance5" ) || ( extension == "VK_KHR_ray_tracing_position_fetch" ) || ( extension == "VK_EXT_shader_object" ) || - ( extension == "VK_QCOM_tile_properties" ) || ( extension == "VK_SEC_amigo_profiling" ) || ( extension == "VK_QCOM_multiview_per_view_viewports" ) || + || ( extension == "VK_KHR_maintenance5" ) || ( extension == "VK_AMD_anti_lag" ) || ( extension == "VK_KHR_ray_tracing_position_fetch" ) || + ( extension == "VK_EXT_shader_object" ) || ( extension == "VK_KHR_pipeline_binary" ) || ( extension == "VK_QCOM_tile_properties" ) || + ( extension == "VK_SEC_amigo_profiling" ) || ( extension == "VK_QCOM_multiview_per_view_viewports" ) || ( extension == "VK_NV_ray_tracing_invocation_reorder" ) || ( extension == "VK_NV_extended_sparse_address_space" ) || - ( extension == "VK_EXT_mutable_descriptor_type" ) || ( extension == "VK_ARM_shader_core_builtins" ) || - ( extension == "VK_EXT_pipeline_library_group_handles" ) || ( extension == "VK_EXT_dynamic_rendering_unused_attachments" ) || - ( extension == "VK_NV_low_latency2" ) || ( extension == "VK_KHR_cooperative_matrix" ) || - ( extension == "VK_QCOM_multiview_per_view_render_areas" ) || ( extension == "VK_KHR_video_decode_av1" ) || - ( extension == "VK_KHR_video_maintenance1" ) || ( extension == "VK_NV_per_stage_descriptor_set" ) || ( extension == "VK_QCOM_image_processing2" ) || + ( extension == "VK_EXT_mutable_descriptor_type" ) || ( extension == "VK_EXT_legacy_vertex_attributes" ) || + ( extension == "VK_ARM_shader_core_builtins" ) || ( extension == "VK_EXT_pipeline_library_group_handles" ) || + ( extension == "VK_EXT_dynamic_rendering_unused_attachments" ) || ( extension == "VK_NV_low_latency2" ) || + ( extension == "VK_KHR_cooperative_matrix" ) || ( extension == "VK_QCOM_multiview_per_view_render_areas" ) || + ( extension == "VK_KHR_compute_shader_derivatives" ) || ( extension == "VK_KHR_video_decode_av1" ) || ( extension == "VK_KHR_video_maintenance1" ) || + ( extension == "VK_NV_per_stage_descriptor_set" ) || ( extension == "VK_QCOM_image_processing2" ) || ( extension == "VK_QCOM_filter_cubic_weights" ) || ( extension == "VK_QCOM_ycbcr_degamma" ) || ( extension == "VK_QCOM_filter_cubic_clamp" ) || ( extension == "VK_EXT_attachment_feedback_loop_dynamic_state" ) || ( extension == "VK_KHR_vertex_attribute_divisor" ) || ( extension == "VK_KHR_load_store_op_none" ) || ( extension == "VK_KHR_shader_float_controls2" ) @@ -3069,7 +3113,10 @@ namespace VULKAN_HPP_NAMESPACE || ( extension == "VK_MSFT_layered_driver" ) || ( extension == "VK_KHR_index_type_uint8" ) || ( extension == "VK_KHR_line_rasterization" ) || ( extension == "VK_KHR_calibrated_timestamps" ) || ( extension == "VK_KHR_shader_expect_assume" ) || ( extension == "VK_KHR_maintenance6" ) || ( extension == "VK_NV_descriptor_pool_overallocation" ) || ( extension == "VK_NV_raw_access_chains" ) || - ( extension == "VK_NV_shader_atomic_float16_vector" ) || ( extension == "VK_NV_ray_tracing_validation" ); + ( extension == "VK_KHR_shader_relaxed_extended_instruction" ) || ( extension == "VK_NV_command_buffer_inheritance" ) || + ( extension == "VK_KHR_maintenance7" ) || ( extension == "VK_NV_shader_atomic_float16_vector" ) || + ( extension == "VK_EXT_shader_replicated_composites" ) || ( extension == "VK_NV_ray_tracing_validation" ) || + ( extension == "VK_MESA_image_alignment_control" ); } VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR_20 bool isInstanceExtension( std::string const & extension ) @@ -3162,15 +3209,16 @@ namespace VULKAN_HPP_NAMESPACE ( extension == "VK_KHR_shader_atomic_int64" ) || ( extension == "VK_EXT_calibrated_timestamps" ) || ( extension == "VK_EXT_vertex_attribute_divisor" ) || ( extension == "VK_EXT_pipeline_creation_feedback" ) || ( extension == "VK_KHR_driver_properties" ) || ( extension == "VK_KHR_shader_float_controls" ) || ( extension == "VK_KHR_depth_stencil_resolve" ) || - ( extension == "VK_NV_fragment_shader_barycentric" ) || ( extension == "VK_KHR_timeline_semaphore" ) || - ( extension == "VK_KHR_vulkan_memory_model" ) || ( extension == "VK_KHR_shader_terminate_invocation" ) || - ( extension == "VK_EXT_scalar_block_layout" ) || ( extension == "VK_EXT_subgroup_size_control" ) || ( extension == "VK_KHR_spirv_1_4" ) || - ( extension == "VK_KHR_separate_depth_stencil_layouts" ) || ( extension == "VK_EXT_tooling_info" ) || - ( extension == "VK_EXT_separate_stencil_usage" ) || ( extension == "VK_KHR_uniform_buffer_standard_layout" ) || - ( extension == "VK_KHR_buffer_device_address" ) || ( extension == "VK_EXT_line_rasterization" ) || ( extension == "VK_EXT_host_query_reset" ) || - ( extension == "VK_EXT_index_type_uint8" ) || ( extension == "VK_EXT_extended_dynamic_state" ) || - ( extension == "VK_EXT_shader_demote_to_helper_invocation" ) || ( extension == "VK_KHR_shader_integer_dot_product" ) || - ( extension == "VK_EXT_texel_buffer_alignment" ) || ( extension == "VK_KHR_shader_non_semantic_info" ) || ( extension == "VK_EXT_private_data" ) || + ( extension == "VK_NV_compute_shader_derivatives" ) || ( extension == "VK_NV_fragment_shader_barycentric" ) || + ( extension == "VK_KHR_timeline_semaphore" ) || ( extension == "VK_KHR_vulkan_memory_model" ) || + ( extension == "VK_KHR_shader_terminate_invocation" ) || ( extension == "VK_EXT_scalar_block_layout" ) || + ( extension == "VK_EXT_subgroup_size_control" ) || ( extension == "VK_KHR_spirv_1_4" ) || ( extension == "VK_KHR_separate_depth_stencil_layouts" ) || + ( extension == "VK_EXT_tooling_info" ) || ( extension == "VK_EXT_separate_stencil_usage" ) || + ( extension == "VK_KHR_uniform_buffer_standard_layout" ) || ( extension == "VK_KHR_buffer_device_address" ) || + ( extension == "VK_EXT_line_rasterization" ) || ( extension == "VK_EXT_host_query_reset" ) || ( extension == "VK_EXT_index_type_uint8" ) || + ( extension == "VK_EXT_extended_dynamic_state" ) || ( extension == "VK_EXT_shader_demote_to_helper_invocation" ) || + ( extension == "VK_KHR_shader_integer_dot_product" ) || ( extension == "VK_EXT_texel_buffer_alignment" ) || + ( extension == "VK_KHR_shader_non_semantic_info" ) || ( extension == "VK_EXT_private_data" ) || ( extension == "VK_EXT_pipeline_creation_cache_control" ) || ( extension == "VK_KHR_synchronization2" ) || ( extension == "VK_KHR_zero_initialize_workgroup_memory" ) || ( extension == "VK_EXT_ycbcr_2plane_444_formats" ) || ( extension == "VK_EXT_image_robustness" ) || ( extension == "VK_KHR_copy_commands2" ) || ( extension == "VK_EXT_4444_formats" ) || diff --git a/third_party/vulkan/vulkan_format_traits.hpp b/third_party/vulkan/vulkan_format_traits.hpp index 8f26981..25790fd 100644 --- a/third_party/vulkan/vulkan_format_traits.hpp +++ b/third_party/vulkan/vulkan_format_traits.hpp @@ -362,7 +362,7 @@ namespace VULKAN_HPP_NAMESPACE case VULKAN_HPP_NAMESPACE::Format::ePvrtc14BppSrgbBlockIMG: return 8; case VULKAN_HPP_NAMESPACE::Format::ePvrtc22BppSrgbBlockIMG: return 8; case VULKAN_HPP_NAMESPACE::Format::ePvrtc24BppSrgbBlockIMG: return 8; - case VULKAN_HPP_NAMESPACE::Format::eR16G16S105NV: return 4; + case VULKAN_HPP_NAMESPACE::Format::eR16G16Sfixed5NV: return 4; case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16KHR: return 2; case VULKAN_HPP_NAMESPACE::Format::eA8UnormKHR: return 1; @@ -621,7 +621,7 @@ namespace VULKAN_HPP_NAMESPACE case VULKAN_HPP_NAMESPACE::Format::ePvrtc14BppSrgbBlockIMG: return "PVRTC1_4BPP"; case VULKAN_HPP_NAMESPACE::Format::ePvrtc22BppSrgbBlockIMG: return "PVRTC2_2BPP"; case VULKAN_HPP_NAMESPACE::Format::ePvrtc24BppSrgbBlockIMG: return "PVRTC2_4BPP"; - case VULKAN_HPP_NAMESPACE::Format::eR16G16S105NV: return "32-bit"; + case VULKAN_HPP_NAMESPACE::Format::eR16G16Sfixed5NV: return "32-bit"; case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16KHR: return "16-bit"; case VULKAN_HPP_NAMESPACE::Format::eA8UnormKHR: return "8-bit alpha"; @@ -2005,7 +2005,7 @@ namespace VULKAN_HPP_NAMESPACE case 3: return 4; default: VULKAN_HPP_ASSERT( false ); return 0; } - case VULKAN_HPP_NAMESPACE::Format::eR16G16S105NV: + case VULKAN_HPP_NAMESPACE::Format::eR16G16Sfixed5NV: switch ( component ) { case 0: return 16; @@ -2283,7 +2283,7 @@ namespace VULKAN_HPP_NAMESPACE case VULKAN_HPP_NAMESPACE::Format::ePvrtc14BppSrgbBlockIMG: return 4; case VULKAN_HPP_NAMESPACE::Format::ePvrtc22BppSrgbBlockIMG: return 4; case VULKAN_HPP_NAMESPACE::Format::ePvrtc24BppSrgbBlockIMG: return 4; - case VULKAN_HPP_NAMESPACE::Format::eR16G16S105NV: return 2; + case VULKAN_HPP_NAMESPACE::Format::eR16G16Sfixed5NV: return 2; case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16KHR: return 4; case VULKAN_HPP_NAMESPACE::Format::eA8UnormKHR: return 1; @@ -4299,7 +4299,7 @@ namespace VULKAN_HPP_NAMESPACE case 3: return "A"; default: VULKAN_HPP_ASSERT( false ); return ""; } - case VULKAN_HPP_NAMESPACE::Format::eR16G16S105NV: + case VULKAN_HPP_NAMESPACE::Format::eR16G16Sfixed5NV: switch ( component ) { case 0: return "R"; @@ -6334,11 +6334,11 @@ namespace VULKAN_HPP_NAMESPACE case 3: return "SRGB"; default: VULKAN_HPP_ASSERT( false ); return ""; } - case VULKAN_HPP_NAMESPACE::Format::eR16G16S105NV: + case VULKAN_HPP_NAMESPACE::Format::eR16G16Sfixed5NV: switch ( component ) { - case 0: return "SINT"; - case 1: return "SINT"; + case 0: return "SFIXED5"; + case 1: return "SFIXED5"; default: VULKAN_HPP_ASSERT( false ); return ""; } case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16KHR: @@ -7657,7 +7657,7 @@ namespace VULKAN_HPP_NAMESPACE case VULKAN_HPP_NAMESPACE::Format::ePvrtc14BppSrgbBlockIMG: return 1; case VULKAN_HPP_NAMESPACE::Format::ePvrtc22BppSrgbBlockIMG: return 1; case VULKAN_HPP_NAMESPACE::Format::ePvrtc24BppSrgbBlockIMG: return 1; - case VULKAN_HPP_NAMESPACE::Format::eR16G16S105NV: return 1; + case VULKAN_HPP_NAMESPACE::Format::eR16G16Sfixed5NV: return 1; case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16KHR: return 1; case VULKAN_HPP_NAMESPACE::Format::eA8UnormKHR: return 1; diff --git a/third_party/vulkan/vulkan_funcs.hpp b/third_party/vulkan/vulkan_funcs.hpp index 86a4863..f91d6a4 100644 --- a/third_party/vulkan/vulkan_funcs.hpp +++ b/third_party/vulkan/vulkan_funcs.hpp @@ -8,6 +8,9 @@ #ifndef VULKAN_FUNCS_HPP #define VULKAN_FUNCS_HPP +// include-what-you-use: make sure, vulkan.hpp is used by code-completers +// IWYU pragma: private; include "vulkan.hpp" + namespace VULKAN_HPP_NAMESPACE { @@ -44,9 +47,9 @@ namespace VULKAN_HPP_NAMESPACE d.vkCreateInstance( reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &instance ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::createInstance" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::createInstance" ); - return createResultValueType( result, std::move( instance ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( instance ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -64,10 +67,10 @@ namespace VULKAN_HPP_NAMESPACE d.vkCreateInstance( reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &instance ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::createInstanceUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::createInstanceUnique" ); - return createResultValueType( result, - UniqueHandle( instance, ObjectDestroy( allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( instance, ObjectDestroy( allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -125,13 +128,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkEnumeratePhysicalDevices( m_instance, &physicalDeviceCount, reinterpret_cast( physicalDevices.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDevices" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDevices" ); VULKAN_HPP_ASSERT( physicalDeviceCount <= physicalDevices.size() ); if ( physicalDeviceCount < physicalDevices.size() ) { physicalDevices.resize( physicalDeviceCount ); } - return createResultValueType( result, std::move( physicalDevices ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( physicalDevices ) ); } template ( physicalDevices.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDevices" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDevices" ); VULKAN_HPP_ASSERT( physicalDeviceCount <= physicalDevices.size() ); if ( physicalDeviceCount < physicalDevices.size() ) { physicalDevices.resize( physicalDeviceCount ); } - return createResultValueType( result, std::move( physicalDevices ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( physicalDevices ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -261,9 +264,9 @@ namespace VULKAN_HPP_NAMESPACE static_cast( usage ), static_cast( flags ), reinterpret_cast( &imageFormatProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties" ); - return createResultValueType( result, std::move( imageFormatProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( imageFormatProperties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -453,9 +456,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &device ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDevice" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDevice" ); - return createResultValueType( result, std::move( device ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( device ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -476,9 +479,10 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &device ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDeviceUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDeviceUnique" ); - return createResultValueType( result, UniqueHandle( device, ObjectDestroy( allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( device, ObjectDestroy( allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -539,13 +543,13 @@ namespace VULKAN_HPP_NAMESPACE layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::enumerateInstanceExtensionProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::enumerateInstanceExtensionProperties" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template < @@ -576,13 +580,13 @@ namespace VULKAN_HPP_NAMESPACE layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::enumerateInstanceExtensionProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::enumerateInstanceExtensionProperties" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -621,13 +625,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateDeviceExtensionProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateDeviceExtensionProperties" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template < @@ -658,13 +662,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateDeviceExtensionProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateDeviceExtensionProperties" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -700,13 +704,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkEnumerateInstanceLayerProperties( &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::enumerateInstanceLayerProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::enumerateInstanceLayerProperties" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template ( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::enumerateInstanceLayerProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::enumerateInstanceLayerProperties" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -775,13 +779,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkEnumerateDeviceLayerProperties( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateDeviceLayerProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateDeviceLayerProperties" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template ( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateDeviceLayerProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateDeviceLayerProperties" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -865,9 +869,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkQueueSubmit( m_queue, submits.size(), reinterpret_cast( submits.data() ), static_cast( fence ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -888,9 +892,9 @@ namespace VULKAN_HPP_NAMESPACE # endif VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkQueueWaitIdle( m_queue ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::waitIdle" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::waitIdle" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -911,9 +915,9 @@ namespace VULKAN_HPP_NAMESPACE # endif VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkDeviceWaitIdle( m_device ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitIdle" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitIdle" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -948,9 +952,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &allocateInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &memory ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateMemory" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateMemory" ); - return createResultValueType( result, std::move( memory ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( memory ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -971,10 +975,10 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &allocateInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &memory ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateMemoryUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateMemoryUnique" ); - return createResultValueType( result, - UniqueHandle( memory, ObjectFree( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( memory, ObjectFree( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -1068,9 +1072,9 @@ namespace VULKAN_HPP_NAMESPACE static_cast( size ), static_cast( flags ), &pData ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mapMemory" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mapMemory" ); - return createResultValueType( result, std::move( pData ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( pData ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -1103,9 +1107,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkFlushMappedMemoryRanges( m_device, memoryRanges.size(), reinterpret_cast( memoryRanges.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::flushMappedMemoryRanges" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::flushMappedMemoryRanges" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -1132,9 +1136,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkInvalidateMappedMemoryRanges( m_device, memoryRanges.size(), reinterpret_cast( memoryRanges.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::invalidateMappedMemoryRanges" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::invalidateMappedMemoryRanges" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -1187,9 +1191,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkBindBufferMemory( m_device, static_cast( buffer ), static_cast( memory ), static_cast( memoryOffset ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -1216,9 +1220,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkBindImageMemory( m_device, static_cast( image ), static_cast( memory ), static_cast( memoryOffset ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -1485,9 +1489,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkQueueBindSparse( m_queue, bindInfo.size(), reinterpret_cast( bindInfo.data() ), static_cast( fence ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::bindSparse" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::bindSparse" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -1520,9 +1524,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createFence" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createFence" ); - return createResultValueType( result, std::move( fence ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( fence ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -1541,10 +1545,10 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createFenceUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createFenceUnique" ); - return createResultValueType( result, - UniqueHandle( fence, ObjectDestroy( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( fence, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -1622,9 +1626,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkResetFences( m_device, fences.size(), reinterpret_cast( fences.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetFences" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetFences" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -1645,7 +1649,7 @@ namespace VULKAN_HPP_NAMESPACE # endif VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetFenceStatus( m_device, static_cast( fence ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceStatus", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); return static_cast( result ); @@ -1679,7 +1683,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkWaitForFences( m_device, fences.size(), reinterpret_cast( fences.data() ), static_cast( waitAll ), timeout ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitForFences", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout } ); return static_cast( result ); @@ -1717,9 +1721,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &semaphore ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSemaphore" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSemaphore" ); - return createResultValueType( result, std::move( semaphore ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( semaphore ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -1740,9 +1744,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &semaphore ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSemaphoreUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSemaphoreUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( semaphore, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -1829,9 +1833,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &event ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createEvent" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createEvent" ); - return createResultValueType( result, std::move( event ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( event ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -1850,10 +1854,10 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &event ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createEventUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createEventUnique" ); - return createResultValueType( result, - UniqueHandle( event, ObjectDestroy( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( event, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -1927,7 +1931,7 @@ namespace VULKAN_HPP_NAMESPACE # endif VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetEventStatus( m_device, static_cast( event ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEventStatus", { VULKAN_HPP_NAMESPACE::Result::eEventSet, VULKAN_HPP_NAMESPACE::Result::eEventReset } ); return static_cast( result ); @@ -1952,9 +1956,9 @@ namespace VULKAN_HPP_NAMESPACE # endif VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkSetEvent( m_device, static_cast( event ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setEvent" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setEvent" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -1975,9 +1979,9 @@ namespace VULKAN_HPP_NAMESPACE # endif VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkResetEvent( m_device, static_cast( event ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetEvent" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetEvent" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -2012,9 +2016,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &queryPool ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createQueryPool" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createQueryPool" ); - return createResultValueType( result, std::move( queryPool ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( queryPool ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -2035,9 +2039,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &queryPool ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createQueryPoolUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createQueryPoolUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( queryPool, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -2142,9 +2146,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( data.data() ), static_cast( stride ), static_cast( flags ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::getQueryPoolResults", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::getQueryPoolResults", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); return ResultValue>( result, std::move( data ) ); } @@ -2171,7 +2175,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &data ), static_cast( stride ), static_cast( flags ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getQueryPoolResult", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); return ResultValue( result, std::move( data ) ); @@ -2207,9 +2211,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &buffer ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createBuffer" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createBuffer" ); - return createResultValueType( result, std::move( buffer ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( buffer ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -2228,10 +2232,10 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &buffer ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferUnique" ); - return createResultValueType( result, - UniqueHandle( buffer, ObjectDestroy( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( buffer, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -2319,9 +2323,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &view ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferView" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferView" ); - return createResultValueType( result, std::move( view ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( view ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -2342,10 +2346,10 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &view ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferViewUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferViewUnique" ); - return createResultValueType( result, - UniqueHandle( view, ObjectDestroy( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( view, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -2431,9 +2435,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &image ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createImage" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createImage" ); - return createResultValueType( result, std::move( image ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( image ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -2452,10 +2456,10 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &image ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createImageUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createImageUnique" ); - return createResultValueType( result, - UniqueHandle( image, ObjectDestroy( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( image, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -2576,9 +2580,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &view ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createImageView" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createImageView" ); - return createResultValueType( result, std::move( view ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( view ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -2599,10 +2603,10 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &view ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createImageViewUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createImageViewUnique" ); - return createResultValueType( result, - UniqueHandle( view, ObjectDestroy( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( view, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -2690,9 +2694,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &shaderModule ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createShaderModule" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createShaderModule" ); - return createResultValueType( result, std::move( shaderModule ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( shaderModule ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -2713,9 +2717,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &shaderModule ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createShaderModuleUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createShaderModuleUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( shaderModule, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -2804,9 +2808,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipelineCache ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineCache" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineCache" ); - return createResultValueType( result, std::move( pipelineCache ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( pipelineCache ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -2827,9 +2831,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipelineCache ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineCacheUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineCacheUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( pipelineCache, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -2921,13 +2925,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPipelineCacheData( m_device, static_cast( pipelineCache ), &dataSize, reinterpret_cast( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineCacheData" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineCacheData" ); VULKAN_HPP_ASSERT( dataSize <= data.size() ); if ( dataSize < data.size() ) { data.resize( dataSize ); } - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } template ( pipelineCache ), &dataSize, reinterpret_cast( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineCacheData" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineCacheData" ); VULKAN_HPP_ASSERT( dataSize <= data.size() ); if ( dataSize < data.size() ) { data.resize( dataSize ); } - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -2990,9 +2994,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkMergePipelineCaches( m_device, static_cast( dstCache ), srcCaches.size(), reinterpret_cast( srcCaches.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mergePipelineCaches" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mergePipelineCaches" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -3034,9 +3038,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelines", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelines", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, std::move( pipelines ) ); } @@ -3064,9 +3068,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelines", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelines", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, std::move( pipelines ) ); } @@ -3091,9 +3095,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipeline", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipeline", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue( result, std::move( pipeline ) ); } @@ -3119,9 +3123,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelinesUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelinesUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); std::vector, PipelineAllocator> uniquePipelines; uniquePipelines.reserve( createInfos.size() ); ObjectDestroy deleter( *this, allocator, d ); @@ -3156,9 +3160,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelinesUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelinesUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); std::vector, PipelineAllocator> uniquePipelines( pipelineAllocator ); uniquePipelines.reserve( createInfos.size() ); ObjectDestroy deleter( *this, allocator, d ); @@ -3189,9 +3193,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelineUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelineUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, UniqueHandle( pipeline, ObjectDestroy( *this, allocator, d ) ) ); @@ -3237,9 +3241,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelines", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelines", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, std::move( pipelines ) ); } @@ -3267,9 +3271,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelines", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelines", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, std::move( pipelines ) ); } @@ -3294,9 +3298,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipeline", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipeline", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue( result, std::move( pipeline ) ); } @@ -3322,9 +3326,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelinesUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelinesUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); std::vector, PipelineAllocator> uniquePipelines; uniquePipelines.reserve( createInfos.size() ); ObjectDestroy deleter( *this, allocator, d ); @@ -3359,9 +3363,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelinesUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelinesUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); std::vector, PipelineAllocator> uniquePipelines( pipelineAllocator ); uniquePipelines.reserve( createInfos.size() ); ObjectDestroy deleter( *this, allocator, d ); @@ -3392,9 +3396,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelineUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelineUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, UniqueHandle( pipeline, ObjectDestroy( *this, allocator, d ) ) ); @@ -3485,9 +3489,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipelineLayout ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineLayout" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineLayout" ); - return createResultValueType( result, std::move( pipelineLayout ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( pipelineLayout ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -3508,9 +3512,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipelineLayout ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineLayoutUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineLayoutUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( pipelineLayout, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -3597,9 +3601,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &sampler ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSampler" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSampler" ); - return createResultValueType( result, std::move( sampler ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( sampler ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -3618,10 +3622,10 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &sampler ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerUnique" ); - return createResultValueType( result, - UniqueHandle( sampler, ObjectDestroy( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( sampler, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -3709,9 +3713,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &setLayout ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorSetLayout" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorSetLayout" ); - return createResultValueType( result, std::move( setLayout ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( setLayout ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -3732,9 +3736,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &setLayout ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorSetLayoutUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorSetLayoutUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( setLayout, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -3827,9 +3831,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorPool ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorPool" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorPool" ); - return createResultValueType( result, std::move( descriptorPool ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( descriptorPool ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -3850,9 +3854,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorPool ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorPoolUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorPoolUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( descriptorPool, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -3958,9 +3962,9 @@ namespace VULKAN_HPP_NAMESPACE std::vector descriptorSets( allocateInfo.descriptorSetCount ); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAllocateDescriptorSets( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( descriptorSets.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSets" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSets" ); - return createResultValueType( result, std::move( descriptorSets ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( descriptorSets ) ); } template descriptorSets( allocateInfo.descriptorSetCount, descriptorSetAllocator ); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAllocateDescriptorSets( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( descriptorSets.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSets" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSets" ); - return createResultValueType( result, std::move( descriptorSets ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( descriptorSets ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -3998,7 +4002,7 @@ namespace VULKAN_HPP_NAMESPACE std::vector descriptorSets( allocateInfo.descriptorSetCount ); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAllocateDescriptorSets( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( descriptorSets.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSetsUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSetsUnique" ); std::vector, DescriptorSetAllocator> uniqueDescriptorSets; uniqueDescriptorSets.reserve( allocateInfo.descriptorSetCount ); PoolFree deleter( *this, allocateInfo.descriptorPool, d ); @@ -4006,7 +4010,7 @@ namespace VULKAN_HPP_NAMESPACE { uniqueDescriptorSets.push_back( UniqueHandle( descriptorSet, deleter ) ); } - return createResultValueType( result, std::move( uniqueDescriptorSets ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( uniqueDescriptorSets ) ); } template < @@ -4028,7 +4032,7 @@ namespace VULKAN_HPP_NAMESPACE std::vector descriptorSets( allocateInfo.descriptorSetCount ); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAllocateDescriptorSets( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( descriptorSets.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSetsUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSetsUnique" ); std::vector, DescriptorSetAllocator> uniqueDescriptorSets( descriptorSetAllocator ); uniqueDescriptorSets.reserve( allocateInfo.descriptorSetCount ); PoolFree deleter( *this, allocateInfo.descriptorPool, d ); @@ -4036,7 +4040,7 @@ namespace VULKAN_HPP_NAMESPACE { uniqueDescriptorSets.push_back( UniqueHandle( descriptorSet, deleter ) ); } - return createResultValueType( result, std::move( uniqueDescriptorSets ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( uniqueDescriptorSets ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -4161,9 +4165,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &framebuffer ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createFramebuffer" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createFramebuffer" ); - return createResultValueType( result, std::move( framebuffer ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( framebuffer ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -4184,9 +4188,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &framebuffer ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createFramebufferUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createFramebufferUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( framebuffer, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -4275,9 +4279,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass" ); - return createResultValueType( result, std::move( renderPass ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( renderPass ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -4298,9 +4302,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPassUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPassUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( renderPass, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -4415,9 +4419,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &commandPool ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCommandPool" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCommandPool" ); - return createResultValueType( result, std::move( commandPool ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( commandPool ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -4438,9 +4442,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &commandPool ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCommandPoolUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCommandPoolUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( commandPool, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -4519,9 +4523,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkResetCommandPool( m_device, static_cast( commandPool ), static_cast( flags ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetCommandPool" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetCommandPool" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -4548,9 +4552,9 @@ namespace VULKAN_HPP_NAMESPACE std::vector commandBuffers( allocateInfo.commandBufferCount ); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAllocateCommandBuffers( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( commandBuffers.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffers" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffers" ); - return createResultValueType( result, std::move( commandBuffers ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( commandBuffers ) ); } template commandBuffers( allocateInfo.commandBufferCount, commandBufferAllocator ); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAllocateCommandBuffers( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( commandBuffers.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffers" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffers" ); - return createResultValueType( result, std::move( commandBuffers ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( commandBuffers ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -4588,7 +4592,7 @@ namespace VULKAN_HPP_NAMESPACE std::vector commandBuffers( allocateInfo.commandBufferCount ); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAllocateCommandBuffers( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( commandBuffers.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffersUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffersUnique" ); std::vector, CommandBufferAllocator> uniqueCommandBuffers; uniqueCommandBuffers.reserve( allocateInfo.commandBufferCount ); PoolFree deleter( *this, allocateInfo.commandPool, d ); @@ -4596,7 +4600,7 @@ namespace VULKAN_HPP_NAMESPACE { uniqueCommandBuffers.push_back( UniqueHandle( commandBuffer, deleter ) ); } - return createResultValueType( result, std::move( uniqueCommandBuffers ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( uniqueCommandBuffers ) ); } template < @@ -4618,7 +4622,7 @@ namespace VULKAN_HPP_NAMESPACE std::vector commandBuffers( allocateInfo.commandBufferCount ); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAllocateCommandBuffers( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( commandBuffers.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffersUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffersUnique" ); std::vector, CommandBufferAllocator> uniqueCommandBuffers( commandBufferAllocator ); uniqueCommandBuffers.reserve( allocateInfo.commandBufferCount ); PoolFree deleter( *this, allocateInfo.commandPool, d ); @@ -4626,7 +4630,7 @@ namespace VULKAN_HPP_NAMESPACE { uniqueCommandBuffers.push_back( UniqueHandle( commandBuffer, deleter ) ); } - return createResultValueType( result, std::move( uniqueCommandBuffers ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( uniqueCommandBuffers ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -4705,9 +4709,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkBeginCommandBuffer( m_commandBuffer, reinterpret_cast( &beginInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::begin" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::begin" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -4728,9 +4732,9 @@ namespace VULKAN_HPP_NAMESPACE # endif VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkEndCommandBuffer( m_commandBuffer ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::end" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::end" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -4753,9 +4757,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkResetCommandBuffer( m_commandBuffer, static_cast( flags ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::reset" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::reset" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -5730,9 +5734,9 @@ namespace VULKAN_HPP_NAMESPACE uint32_t apiVersion; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkEnumerateInstanceVersion( &apiVersion ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::enumerateInstanceVersion" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::enumerateInstanceVersion" ); - return createResultValueType( result, std::move( apiVersion ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( apiVersion ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -5757,9 +5761,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkBindBufferMemory2( m_device, bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory2" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory2" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -5784,9 +5788,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkBindImageMemory2( m_device, bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory2" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory2" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -5877,13 +5881,13 @@ namespace VULKAN_HPP_NAMESPACE m_instance, &physicalDeviceGroupCount, reinterpret_cast( physicalDeviceGroupProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDeviceGroups" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDeviceGroups" ); VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); if ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) { physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); } - return createResultValueType( result, std::move( physicalDeviceGroupProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( physicalDeviceGroupProperties ) ); } template ( physicalDeviceGroupProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDeviceGroups" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDeviceGroups" ); VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); if ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) { physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); } - return createResultValueType( result, std::move( physicalDeviceGroupProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( physicalDeviceGroupProperties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -6254,9 +6258,9 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceImageFormatProperties2( m_physicalDevice, reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2" ); - return createResultValueType( result, std::move( imageFormatProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( imageFormatProperties ) ); } template @@ -6275,9 +6279,9 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceImageFormatProperties2( m_physicalDevice, reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2" ); - return createResultValueType( result, std::move( structureChain ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( structureChain ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -6608,9 +6612,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &ycbcrConversion ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversion" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversion" ); - return createResultValueType( result, std::move( ycbcrConversion ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( ycbcrConversion ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -6632,9 +6636,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &ycbcrConversion ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversionUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversionUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( ycbcrConversion, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -6731,9 +6735,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorUpdateTemplate ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplate" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplate" ); - return createResultValueType( result, std::move( descriptorUpdateTemplate ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( descriptorUpdateTemplate ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -6755,11 +6759,11 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorUpdateTemplate ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplateUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplateUnique" ); - return createResultValueType( result, - UniqueHandle( - descriptorUpdateTemplate, ObjectDestroy( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, + UniqueHandle( + descriptorUpdateTemplate, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -7068,9 +7072,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2" ); - return createResultValueType( result, std::move( renderPass ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( renderPass ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -7091,9 +7095,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2Unique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2Unique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( renderPass, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -7203,9 +7207,9 @@ namespace VULKAN_HPP_NAMESPACE uint64_t value; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetSemaphoreCounterValue( m_device, static_cast( semaphore ), &value ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreCounterValue" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreCounterValue" ); - return createResultValueType( result, std::move( value ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( value ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -7230,7 +7234,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkWaitSemaphores( m_device, reinterpret_cast( &waitInfo ), timeout ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitSemaphores", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout } ); return static_cast( result ); @@ -7257,9 +7261,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkSignalSemaphore( m_device, reinterpret_cast( &signalInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::signalSemaphore" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::signalSemaphore" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -7375,13 +7379,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceToolProperties( m_physicalDevice, &toolCount, reinterpret_cast( toolProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getToolProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getToolProperties" ); VULKAN_HPP_ASSERT( toolCount <= toolProperties.size() ); if ( toolCount < toolProperties.size() ) { toolProperties.resize( toolCount ); } - return createResultValueType( result, std::move( toolProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( toolProperties ) ); } template < @@ -7413,13 +7417,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceToolProperties( m_physicalDevice, &toolCount, reinterpret_cast( toolProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getToolProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getToolProperties" ); VULKAN_HPP_ASSERT( toolCount <= toolProperties.size() ); if ( toolCount < toolProperties.size() ) { toolProperties.resize( toolCount ); } - return createResultValueType( result, std::move( toolProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( toolProperties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -7454,9 +7458,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &privateDataSlot ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPrivateDataSlot" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPrivateDataSlot" ); - return createResultValueType( result, std::move( privateDataSlot ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( privateDataSlot ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -7477,9 +7481,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &privateDataSlot ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPrivateDataSlotUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPrivateDataSlotUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( privateDataSlot, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -7566,9 +7570,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkSetPrivateData( m_device, static_cast( objectType_ ), objectHandle, static_cast( privateDataSlot ), data ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setPrivateData" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setPrivateData" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -7726,9 +7730,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkQueueSubmit2( m_queue, submits.size(), reinterpret_cast( submits.data() ), static_cast( fence ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit2" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit2" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -8346,9 +8350,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 supported; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetPhysicalDeviceSurfaceSupportKHR( m_physicalDevice, queueFamilyIndex, static_cast( surface ), reinterpret_cast( &supported ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceSupportKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceSupportKHR" ); - return createResultValueType( result, std::move( supported ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( supported ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -8375,9 +8379,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR surfaceCapabilities; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetPhysicalDeviceSurfaceCapabilitiesKHR( m_physicalDevice, static_cast( surface ), reinterpret_cast( &surfaceCapabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilitiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilitiesKHR" ); - return createResultValueType( result, std::move( surfaceCapabilities ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surfaceCapabilities ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -8416,13 +8420,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, static_cast( surface ), &surfaceFormatCount, reinterpret_cast( surfaceFormats.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormatsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormatsKHR" ); VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); if ( surfaceFormatCount < surfaceFormats.size() ) { surfaceFormats.resize( surfaceFormatCount ); } - return createResultValueType( result, std::move( surfaceFormats ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surfaceFormats ) ); } template ( surface ), &surfaceFormatCount, reinterpret_cast( surfaceFormats.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormatsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormatsKHR" ); VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); if ( surfaceFormatCount < surfaceFormats.size() ) { surfaceFormats.resize( surfaceFormatCount ); } - return createResultValueType( result, std::move( surfaceFormats ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surfaceFormats ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -8497,13 +8501,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, static_cast( surface ), &presentModeCount, reinterpret_cast( presentModes.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfacePresentModesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfacePresentModesKHR" ); VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); if ( presentModeCount < presentModes.size() ) { presentModes.resize( presentModeCount ); } - return createResultValueType( result, std::move( presentModes ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( presentModes ) ); } template ( surface ), &presentModeCount, reinterpret_cast( presentModes.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfacePresentModesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfacePresentModesKHR" ); VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); if ( presentModeCount < presentModes.size() ) { presentModes.resize( presentModeCount ); } - return createResultValueType( result, std::move( presentModes ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( presentModes ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -8576,9 +8580,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &swapchain ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSwapchainKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSwapchainKHR" ); - return createResultValueType( result, std::move( swapchain ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( swapchain ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -8599,9 +8603,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &swapchain ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSwapchainKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSwapchainKHRUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( swapchain, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -8694,13 +8698,13 @@ namespace VULKAN_HPP_NAMESPACE m_device, static_cast( swapchain ), &swapchainImageCount, reinterpret_cast( swapchainImages.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSwapchainImagesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSwapchainImagesKHR" ); VULKAN_HPP_ASSERT( swapchainImageCount <= swapchainImages.size() ); if ( swapchainImageCount < swapchainImages.size() ) { swapchainImages.resize( swapchainImageCount ); } - return createResultValueType( result, std::move( swapchainImages ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( swapchainImages ) ); } template ( swapchain ), &swapchainImageCount, reinterpret_cast( swapchainImages.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSwapchainImagesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSwapchainImagesKHR" ); VULKAN_HPP_ASSERT( swapchainImageCount <= swapchainImages.size() ); if ( swapchainImageCount < swapchainImages.size() ) { swapchainImages.resize( swapchainImageCount ); } - return createResultValueType( result, std::move( swapchainImages ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( swapchainImages ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -8767,12 +8771,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t imageIndex; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAcquireNextImageKHR( m_device, static_cast( swapchain ), timeout, static_cast( semaphore ), static_cast( fence ), &imageIndex ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::acquireNextImageKHR", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eTimeout, - VULKAN_HPP_NAMESPACE::Result::eNotReady, - VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::acquireNextImageKHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eTimeout, + VULKAN_HPP_NAMESPACE::Result::eNotReady, + VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); return ResultValue( result, std::move( imageIndex ) ); } @@ -8798,7 +8802,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkQueuePresentKHR( m_queue, reinterpret_cast( &presentInfo ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::presentKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); return static_cast( result ); @@ -8828,9 +8832,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR deviceGroupPresentCapabilities; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetDeviceGroupPresentCapabilitiesKHR( m_device, reinterpret_cast( &deviceGroupPresentCapabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupPresentCapabilitiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupPresentCapabilitiesKHR" ); - return createResultValueType( result, std::move( deviceGroupPresentCapabilities ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( deviceGroupPresentCapabilities ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -8858,9 +8862,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetDeviceGroupSurfacePresentModesKHR( m_device, static_cast( surface ), reinterpret_cast( &modes ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupSurfacePresentModesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupSurfacePresentModesKHR" ); - return createResultValueType( result, std::move( modes ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( modes ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -8900,13 +8904,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, static_cast( surface ), &rectCount, reinterpret_cast( rects.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getPresentRectanglesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getPresentRectanglesKHR" ); VULKAN_HPP_ASSERT( rectCount <= rects.size() ); if ( rectCount < rects.size() ) { rects.resize( rectCount ); } - return createResultValueType( result, std::move( rects ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( rects ) ); } template ( surface ), &rectCount, reinterpret_cast( rects.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getPresentRectanglesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getPresentRectanglesKHR" ); VULKAN_HPP_ASSERT( rectCount <= rects.size() ); if ( rectCount < rects.size() ) { rects.resize( rectCount ); } - return createResultValueType( result, std::move( rects ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( rects ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -8967,12 +8971,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t imageIndex; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAcquireNextImage2KHR( m_device, reinterpret_cast( &acquireInfo ), &imageIndex ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::acquireNextImage2KHR", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eTimeout, - VULKAN_HPP_NAMESPACE::Result::eNotReady, - VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::acquireNextImage2KHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eTimeout, + VULKAN_HPP_NAMESPACE::Result::eNotReady, + VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); return ResultValue( result, std::move( imageIndex ) ); } @@ -9013,13 +9017,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPropertiesKHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template < @@ -9047,13 +9051,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPropertiesKHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -9091,13 +9095,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlanePropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlanePropertiesKHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template < @@ -9127,13 +9131,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlanePropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlanePropertiesKHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -9171,13 +9175,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, &displayCount, reinterpret_cast( displays.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR" ); VULKAN_HPP_ASSERT( displayCount <= displays.size() ); if ( displayCount < displays.size() ) { displays.resize( displayCount ); } - return createResultValueType( result, std::move( displays ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( displays ) ); } template ( displays.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR" ); VULKAN_HPP_ASSERT( displayCount <= displays.size() ); if ( displayCount < displays.size() ) { displays.resize( displayCount ); } - return createResultValueType( result, std::move( displays ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( displays ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -9250,13 +9254,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, static_cast( display ), &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayModePropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayModePropertiesKHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template ( display ), &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayModePropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayModePropertiesKHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -9333,9 +9337,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &mode ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDisplayModeKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDisplayModeKHR" ); - return createResultValueType( result, std::move( mode ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( mode ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -9358,9 +9362,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &mode ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDisplayModeKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDisplayModeKHRUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( mode, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -9391,9 +9395,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR capabilities; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetDisplayPlaneCapabilitiesKHR( m_physicalDevice, static_cast( mode ), planeIndex, reinterpret_cast( &capabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneCapabilitiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneCapabilitiesKHR" ); - return createResultValueType( result, std::move( capabilities ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( capabilities ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -9428,9 +9432,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDisplayPlaneSurfaceKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDisplayPlaneSurfaceKHR" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -9451,9 +9455,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDisplayPlaneSurfaceKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDisplayPlaneSurfaceKHRUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -9495,9 +9499,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( swapchains.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHR" ); - return createResultValueType( result, std::move( swapchains ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( swapchains ) ); } template ( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( swapchains.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHR" ); - return createResultValueType( result, std::move( swapchains ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( swapchains ) ); } template @@ -9544,9 +9548,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &swapchain ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainKHR" ); - return createResultValueType( result, std::move( swapchain ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( swapchain ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -9569,7 +9573,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( swapchains.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHRUnique" ); std::vector, SwapchainKHRAllocator> uniqueSwapchains; uniqueSwapchains.reserve( createInfos.size() ); ObjectDestroy deleter( *this, allocator, d ); @@ -9577,7 +9581,7 @@ namespace VULKAN_HPP_NAMESPACE { uniqueSwapchains.push_back( UniqueHandle( swapchain, deleter ) ); } - return createResultValueType( result, std::move( uniqueSwapchains ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( uniqueSwapchains ) ); } template ( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( swapchains.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHRUnique" ); std::vector, SwapchainKHRAllocator> uniqueSwapchains( swapchainKHRAllocator ); uniqueSwapchains.reserve( createInfos.size() ); ObjectDestroy deleter( *this, allocator, d ); @@ -9611,7 +9615,7 @@ namespace VULKAN_HPP_NAMESPACE { uniqueSwapchains.push_back( UniqueHandle( swapchain, deleter ) ); } - return createResultValueType( result, std::move( uniqueSwapchains ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( uniqueSwapchains ) ); } template @@ -9632,9 +9636,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &swapchain ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainKHRUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( swapchain, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -9674,9 +9678,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXlibSurfaceKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXlibSurfaceKHR" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -9697,9 +9701,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXlibSurfaceKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXlibSurfaceKHRUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -9765,9 +9769,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXcbSurfaceKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXcbSurfaceKHR" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -9788,9 +9792,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXcbSurfaceKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXcbSurfaceKHRUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -9860,9 +9864,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWaylandSurfaceKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWaylandSurfaceKHR" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -9883,9 +9887,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWaylandSurfaceKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWaylandSurfaceKHRUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -9952,9 +9956,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createAndroidSurfaceKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createAndroidSurfaceKHR" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -9975,9 +9979,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createAndroidSurfaceKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createAndroidSurfaceKHRUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -10018,9 +10022,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWin32SurfaceKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWin32SurfaceKHR" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -10041,9 +10045,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWin32SurfaceKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWin32SurfaceKHRUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -10091,9 +10095,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &callback ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugReportCallbackEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugReportCallbackEXT" ); - return createResultValueType( result, std::move( callback ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( callback ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -10114,9 +10118,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &callback ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugReportCallbackEXTUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugReportCallbackEXTUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( callback, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -10248,9 +10252,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkDebugMarkerSetObjectTagEXT( m_device, reinterpret_cast( &tagInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::debugMarkerSetObjectTagEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::debugMarkerSetObjectTagEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -10274,9 +10278,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkDebugMarkerSetObjectNameEXT( m_device, reinterpret_cast( &nameInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::debugMarkerSetObjectNameEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::debugMarkerSetObjectNameEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -10356,9 +10360,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VideoCapabilitiesKHR capabilities; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetPhysicalDeviceVideoCapabilitiesKHR( m_physicalDevice, reinterpret_cast( &videoProfile ), reinterpret_cast( &capabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoCapabilitiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoCapabilitiesKHR" ); - return createResultValueType( result, std::move( capabilities ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( capabilities ) ); } template @@ -10374,9 +10378,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VideoCapabilitiesKHR & capabilities = structureChain.template get(); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetPhysicalDeviceVideoCapabilitiesKHR( m_physicalDevice, reinterpret_cast( &videoProfile ), reinterpret_cast( &capabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoCapabilitiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoCapabilitiesKHR" ); - return createResultValueType( result, std::move( structureChain ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( structureChain ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -10423,13 +10427,13 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( videoFormatProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoFormatPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoFormatPropertiesKHR" ); VULKAN_HPP_ASSERT( videoFormatPropertyCount <= videoFormatProperties.size() ); if ( videoFormatPropertyCount < videoFormatProperties.size() ) { videoFormatProperties.resize( videoFormatPropertyCount ); } - return createResultValueType( result, std::move( videoFormatProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( videoFormatProperties ) ); } template ( videoFormatProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoFormatPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoFormatPropertiesKHR" ); VULKAN_HPP_ASSERT( videoFormatPropertyCount <= videoFormatProperties.size() ); if ( videoFormatPropertyCount < videoFormatProperties.size() ) { videoFormatProperties.resize( videoFormatPropertyCount ); } - return createResultValueType( result, std::move( videoFormatProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( videoFormatProperties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -10506,9 +10510,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &videoSession ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createVideoSessionKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createVideoSessionKHR" ); - return createResultValueType( result, std::move( videoSession ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( videoSession ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -10529,9 +10533,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &videoSession ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createVideoSessionKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createVideoSessionKHRUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( videoSession, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -10717,9 +10721,9 @@ namespace VULKAN_HPP_NAMESPACE static_cast( videoSession ), bindSessionMemoryInfos.size(), reinterpret_cast( bindSessionMemoryInfos.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindVideoSessionMemoryKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindVideoSessionMemoryKHR" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -10755,9 +10759,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &videoSessionParameters ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createVideoSessionParametersKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createVideoSessionParametersKHR" ); - return createResultValueType( result, std::move( videoSessionParameters ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( videoSessionParameters ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -10778,11 +10782,11 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &videoSessionParameters ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createVideoSessionParametersKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createVideoSessionParametersKHRUnique" ); - return createResultValueType( result, - UniqueHandle( - videoSessionParameters, ObjectDestroy( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, + UniqueHandle( + videoSessionParameters, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -10815,9 +10819,9 @@ namespace VULKAN_HPP_NAMESPACE d.vkUpdateVideoSessionParametersKHR( m_device, static_cast( videoSessionParameters ), reinterpret_cast( &updateInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::updateVideoSessionParametersKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::updateVideoSessionParametersKHR" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -11181,9 +11185,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &module ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCuModuleNVX" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCuModuleNVX" ); - return createResultValueType( result, std::move( module ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( module ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -11204,10 +11208,10 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &module ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCuModuleNVXUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCuModuleNVXUnique" ); - return createResultValueType( result, - UniqueHandle( module, ObjectDestroy( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( module, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -11243,9 +11247,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &function ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCuFunctionNVX" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCuFunctionNVX" ); - return createResultValueType( result, std::move( function ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( function ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -11266,9 +11270,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &function ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCuFunctionNVXUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCuFunctionNVXUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( function, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -11449,9 +11453,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX properties; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetImageViewAddressNVX( m_device, static_cast( imageView ), reinterpret_cast( &properties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageViewAddressNVX" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageViewAddressNVX" ); - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -11549,13 +11553,13 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( info.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getShaderInfoAMD" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getShaderInfoAMD" ); VULKAN_HPP_ASSERT( infoSize <= info.size() ); if ( infoSize < info.size() ) { info.resize( infoSize ); } - return createResultValueType( result, std::move( info ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( info ) ); } template ( info.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getShaderInfoAMD" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getShaderInfoAMD" ); VULKAN_HPP_ASSERT( infoSize <= info.size() ); if ( infoSize < info.size() ) { info.resize( infoSize ); } - return createResultValueType( result, std::move( info ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( info ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -11671,9 +11675,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createStreamDescriptorSurfaceGGP" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createStreamDescriptorSurfaceGGP" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -11694,9 +11698,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createStreamDescriptorSurfaceGGPUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createStreamDescriptorSurfaceGGPUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -11755,9 +11759,9 @@ namespace VULKAN_HPP_NAMESPACE static_cast( flags ), static_cast( externalHandleType ), reinterpret_cast( &externalImageFormatProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getExternalImageFormatPropertiesNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getExternalImageFormatPropertiesNV" ); - return createResultValueType( result, std::move( externalImageFormatProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( externalImageFormatProperties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -11788,9 +11792,9 @@ namespace VULKAN_HPP_NAMESPACE HANDLE handle; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetMemoryWin32HandleNV( m_device, static_cast( memory ), static_cast( handleType ), &handle ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandleNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandleNV" ); - return createResultValueType( result, std::move( handle ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( handle ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ @@ -11958,9 +11962,9 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2KHR" ); - return createResultValueType( result, std::move( imageFormatProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( imageFormatProperties ) ); } template @@ -11979,9 +11983,9 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2KHR" ); - return createResultValueType( result, std::move( structureChain ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( structureChain ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -12332,9 +12336,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createViSurfaceNN" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createViSurfaceNN" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -12355,9 +12359,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createViSurfaceNNUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createViSurfaceNNUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -12413,13 +12417,13 @@ namespace VULKAN_HPP_NAMESPACE m_instance, &physicalDeviceGroupCount, reinterpret_cast( physicalDeviceGroupProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDeviceGroupsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDeviceGroupsKHR" ); VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); if ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) { physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); } - return createResultValueType( result, std::move( physicalDeviceGroupProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( physicalDeviceGroupProperties ) ); } template ( physicalDeviceGroupProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDeviceGroupsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDeviceGroupsKHR" ); VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); if ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) { physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); } - return createResultValueType( result, std::move( physicalDeviceGroupProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( physicalDeviceGroupProperties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -12521,9 +12525,9 @@ namespace VULKAN_HPP_NAMESPACE HANDLE handle; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetMemoryWin32HandleKHR( m_device, reinterpret_cast( &getWin32HandleInfo ), &handle ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandleKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandleKHR" ); - return createResultValueType( result, std::move( handle ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( handle ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -12557,9 +12561,9 @@ namespace VULKAN_HPP_NAMESPACE static_cast( handleType ), handle, reinterpret_cast( &memoryWin32HandleProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandlePropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandlePropertiesKHR" ); - return createResultValueType( result, std::move( memoryWin32HandleProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( memoryWin32HandleProperties ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ @@ -12588,9 +12592,9 @@ namespace VULKAN_HPP_NAMESPACE int fd; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetMemoryFdKHR( m_device, reinterpret_cast( &getFdInfo ), &fd ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryFdKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryFdKHR" ); - return createResultValueType( result, std::move( fd ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( fd ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -12618,9 +12622,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR memoryFdProperties; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetMemoryFdPropertiesKHR( m_device, static_cast( handleType ), fd, reinterpret_cast( &memoryFdProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryFdPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryFdPropertiesKHR" ); - return createResultValueType( result, std::move( memoryFdProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( memoryFdProperties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -12684,9 +12688,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkImportSemaphoreWin32HandleKHR( m_device, reinterpret_cast( &importSemaphoreWin32HandleInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreWin32HandleKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreWin32HandleKHR" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -12712,9 +12716,9 @@ namespace VULKAN_HPP_NAMESPACE HANDLE handle; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetSemaphoreWin32HandleKHR( m_device, reinterpret_cast( &getWin32HandleInfo ), &handle ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreWin32HandleKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreWin32HandleKHR" ); - return createResultValueType( result, std::move( handle ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( handle ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ @@ -12741,9 +12745,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkImportSemaphoreFdKHR( m_device, reinterpret_cast( &importSemaphoreFdInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreFdKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreFdKHR" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -12769,9 +12773,9 @@ namespace VULKAN_HPP_NAMESPACE int fd; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetSemaphoreFdKHR( m_device, reinterpret_cast( &getFdInfo ), &fd ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreFdKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreFdKHR" ); - return createResultValueType( result, std::move( fd ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( fd ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -12917,9 +12921,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorUpdateTemplate ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplateKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplateKHR" ); - return createResultValueType( result, std::move( descriptorUpdateTemplate ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( descriptorUpdateTemplate ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -12941,11 +12945,11 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorUpdateTemplate ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplateKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplateKHRUnique" ); - return createResultValueType( result, - UniqueHandle( - descriptorUpdateTemplate, ObjectDestroy( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, + UniqueHandle( + descriptorUpdateTemplate, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -13085,9 +13089,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAcquireXlibDisplayEXT( m_physicalDevice, &dpy, static_cast( display ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::acquireXlibDisplayEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::acquireXlibDisplayEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -13114,9 +13118,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DisplayKHR display; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetRandROutputDisplayEXT( m_physicalDevice, &dpy, rrOutput, reinterpret_cast( &display ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getRandROutputDisplayEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getRandROutputDisplayEXT" ); - return createResultValueType( result, std::move( display ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( display ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -13132,10 +13136,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DisplayKHR display; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetRandROutputDisplayEXT( m_physicalDevice, &dpy, rrOutput, reinterpret_cast( &display ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getRandROutputDisplayEXTUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getRandROutputDisplayEXTUnique" ); - return createResultValueType( result, - UniqueHandle( display, ObjectRelease( *this, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( display, ObjectRelease( *this, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -13168,9 +13172,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT surfaceCapabilities; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetPhysicalDeviceSurfaceCapabilities2EXT( m_physicalDevice, static_cast( surface ), reinterpret_cast( &surfaceCapabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2EXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2EXT" ); - return createResultValueType( result, std::move( surfaceCapabilities ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surfaceCapabilities ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -13199,9 +13203,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkDisplayPowerControlEXT( m_device, static_cast( display ), reinterpret_cast( &displayPowerInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::displayPowerControlEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::displayPowerControlEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -13236,9 +13240,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &deviceEventInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::registerEventEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::registerEventEXT" ); - return createResultValueType( result, std::move( fence ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( fence ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -13259,10 +13263,10 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &deviceEventInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::registerEventEXTUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::registerEventEXTUnique" ); - return createResultValueType( result, - UniqueHandle( fence, ObjectDestroy( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( fence, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -13302,9 +13306,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &displayEventInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::registerDisplayEventEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::registerDisplayEventEXT" ); - return createResultValueType( result, std::move( fence ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( fence ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -13327,10 +13331,10 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &displayEventInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::registerDisplayEventEXTUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::registerDisplayEventEXTUnique" ); - return createResultValueType( result, - UniqueHandle( fence, ObjectDestroy( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( fence, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -13359,9 +13363,9 @@ namespace VULKAN_HPP_NAMESPACE uint64_t counterValue; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetSwapchainCounterEXT( m_device, static_cast( swapchain ), static_cast( counter ), &counterValue ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSwapchainCounterEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSwapchainCounterEXT" ); - return createResultValueType( result, std::move( counterValue ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( counterValue ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -13391,9 +13395,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE displayTimingProperties; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetRefreshCycleDurationGOOGLE( m_device, static_cast( swapchain ), reinterpret_cast( &displayTimingProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getRefreshCycleDurationGOOGLE" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getRefreshCycleDurationGOOGLE" ); - return createResultValueType( result, std::move( displayTimingProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( displayTimingProperties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -13439,13 +13443,13 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( presentationTimings.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPastPresentationTimingGOOGLE" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPastPresentationTimingGOOGLE" ); VULKAN_HPP_ASSERT( presentationTimingCount <= presentationTimings.size() ); if ( presentationTimingCount < presentationTimings.size() ) { presentationTimings.resize( presentationTimingCount ); } - return createResultValueType( result, std::move( presentationTimings ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( presentationTimings ) ); } template < @@ -13482,13 +13486,13 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( presentationTimings.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPastPresentationTimingGOOGLE" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPastPresentationTimingGOOGLE" ); VULKAN_HPP_ASSERT( presentationTimingCount <= presentationTimings.size() ); if ( presentationTimingCount < presentationTimings.size() ) { presentationTimings.resize( presentationTimingCount ); } - return createResultValueType( result, std::move( presentationTimings ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( presentationTimings ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -13608,9 +13612,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2KHR" ); - return createResultValueType( result, std::move( renderPass ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( renderPass ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -13631,9 +13635,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2KHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2KHRUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( renderPass, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -13735,9 +13739,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetSwapchainStatusKHR( m_device, static_cast( swapchain ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::getSwapchainStatusKHR", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::getSwapchainStatusKHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); return static_cast( result ); } @@ -13801,9 +13805,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkImportFenceWin32HandleKHR( m_device, reinterpret_cast( &importFenceWin32HandleInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importFenceWin32HandleKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importFenceWin32HandleKHR" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -13830,9 +13834,9 @@ namespace VULKAN_HPP_NAMESPACE HANDLE handle; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetFenceWin32HandleKHR( m_device, reinterpret_cast( &getWin32HandleInfo ), &handle ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceWin32HandleKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceWin32HandleKHR" ); - return createResultValueType( result, std::move( handle ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( handle ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ @@ -13859,9 +13863,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkImportFenceFdKHR( m_device, reinterpret_cast( &importFenceFdInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importFenceFdKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importFenceFdKHR" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -13887,9 +13891,9 @@ namespace VULKAN_HPP_NAMESPACE int fd; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetFenceFdKHR( m_device, reinterpret_cast( &getFdInfo ), &fd ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceFdKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceFdKHR" ); - return createResultValueType( result, std::move( fd ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( fd ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -13948,14 +13952,14 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( counterDescriptions.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); VULKAN_HPP_ASSERT( counterCount <= counters.size() ); if ( counterCount < counters.size() ) { counters.resize( counterCount ); counterDescriptions.resize( counterCount ); } - return createResultValueType( result, std::move( data_ ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data_ ) ); } template ( counterDescriptions.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); VULKAN_HPP_ASSERT( counterCount <= counters.size() ); if ( counterCount < counters.size() ) { counters.resize( counterCount ); counterDescriptions.resize( counterCount ); } - return createResultValueType( result, std::move( data_ ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data_ ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -14064,9 +14068,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAcquireProfilingLockKHR( m_device, reinterpret_cast( &info ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireProfilingLockKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireProfilingLockKHR" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -14107,9 +14111,9 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast( &surfaceInfo ), reinterpret_cast( &surfaceCapabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2KHR" ); - return createResultValueType( result, std::move( surfaceCapabilities ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surfaceCapabilities ) ); } template @@ -14128,9 +14132,9 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast( &surfaceInfo ), reinterpret_cast( &surfaceCapabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2KHR" ); - return createResultValueType( result, std::move( structureChain ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( structureChain ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -14175,13 +14179,13 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( surfaceFormats.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormats2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormats2KHR" ); VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); if ( surfaceFormatCount < surfaceFormats.size() ) { surfaceFormats.resize( surfaceFormatCount ); } - return createResultValueType( result, std::move( surfaceFormats ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surfaceFormats ) ); } template ( surfaceFormats.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormats2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormats2KHR" ); VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); if ( surfaceFormatCount < surfaceFormats.size() ) { surfaceFormats.resize( surfaceFormatCount ); } - return createResultValueType( result, std::move( surfaceFormats ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surfaceFormats ) ); } template @@ -14257,7 +14261,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( surfaceFormats.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormats2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormats2KHR" ); VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); if ( surfaceFormatCount < surfaceFormats.size() ) { @@ -14267,7 +14271,7 @@ namespace VULKAN_HPP_NAMESPACE { structureChains[i].template get() = surfaceFormats[i]; } - return createResultValueType( result, std::move( structureChains ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( structureChains ) ); } template ( surfaceFormats.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormats2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormats2KHR" ); VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); if ( surfaceFormatCount < surfaceFormats.size() ) { @@ -14318,7 +14322,7 @@ namespace VULKAN_HPP_NAMESPACE { structureChains[i].template get() = surfaceFormats[i]; } - return createResultValueType( result, std::move( structureChains ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( structureChains ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -14359,13 +14363,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayProperties2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayProperties2KHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template < @@ -14395,13 +14399,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayProperties2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayProperties2KHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -14440,13 +14444,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneProperties2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneProperties2KHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template < @@ -14477,13 +14481,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneProperties2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneProperties2KHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -14523,13 +14527,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, static_cast( display ), &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayModeProperties2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayModeProperties2KHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template < @@ -14562,13 +14566,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, static_cast( display ), &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayModeProperties2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayModeProperties2KHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -14599,9 +14603,9 @@ namespace VULKAN_HPP_NAMESPACE static_cast( d.vkGetDisplayPlaneCapabilities2KHR( m_physicalDevice, reinterpret_cast( &displayPlaneInfo ), reinterpret_cast( &capabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneCapabilities2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneCapabilities2KHR" ); - return createResultValueType( result, std::move( capabilities ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( capabilities ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -14639,9 +14643,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createIOSSurfaceMVK" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createIOSSurfaceMVK" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -14662,9 +14666,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createIOSSurfaceMVKUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createIOSSurfaceMVKUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -14705,9 +14709,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMacOSSurfaceMVK" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMacOSSurfaceMVK" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -14728,9 +14732,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMacOSSurfaceMVKUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMacOSSurfaceMVKUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -14759,9 +14763,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkSetDebugUtilsObjectNameEXT( m_device, reinterpret_cast( &nameInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setDebugUtilsObjectNameEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setDebugUtilsObjectNameEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -14785,9 +14789,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkSetDebugUtilsObjectTagEXT( m_device, reinterpret_cast( &tagInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setDebugUtilsObjectTagEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setDebugUtilsObjectTagEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -14925,9 +14929,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &messenger ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugUtilsMessengerEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugUtilsMessengerEXT" ); - return createResultValueType( result, std::move( messenger ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( messenger ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -14948,9 +14952,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &messenger ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugUtilsMessengerEXTUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugUtilsMessengerEXTUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( messenger, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -15072,9 +15076,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID properties; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetAndroidHardwareBufferPropertiesANDROID( m_device, &buffer, reinterpret_cast( &properties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAndroidHardwareBufferPropertiesANDROID" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAndroidHardwareBufferPropertiesANDROID" ); - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template @@ -15092,9 +15096,9 @@ namespace VULKAN_HPP_NAMESPACE structureChain.template get(); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetAndroidHardwareBufferPropertiesANDROID( m_device, &buffer, reinterpret_cast( &properties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAndroidHardwareBufferPropertiesANDROID" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAndroidHardwareBufferPropertiesANDROID" ); - return createResultValueType( result, std::move( structureChain ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( structureChain ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -15123,9 +15127,9 @@ namespace VULKAN_HPP_NAMESPACE struct AHardwareBuffer * buffer; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetMemoryAndroidHardwareBufferANDROID( m_device, reinterpret_cast( &info ), &buffer ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryAndroidHardwareBufferANDROID" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryAndroidHardwareBufferANDROID" ); - return createResultValueType( result, std::move( buffer ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( buffer ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ @@ -15172,9 +15176,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createExecutionGraphPipelinesAMDX", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createExecutionGraphPipelinesAMDX", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, std::move( pipelines ) ); } @@ -15202,9 +15206,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createExecutionGraphPipelinesAMDX", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createExecutionGraphPipelinesAMDX", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, std::move( pipelines ) ); } @@ -15229,9 +15233,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createExecutionGraphPipelineAMDX", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createExecutionGraphPipelineAMDX", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue( result, std::move( pipeline ) ); } @@ -15258,9 +15262,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createExecutionGraphPipelinesAMDXUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createExecutionGraphPipelinesAMDXUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); std::vector, PipelineAllocator> uniquePipelines; uniquePipelines.reserve( createInfos.size() ); ObjectDestroy deleter( *this, allocator, d ); @@ -15296,9 +15300,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createExecutionGraphPipelinesAMDXUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createExecutionGraphPipelinesAMDXUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); std::vector, PipelineAllocator> uniquePipelines( pipelineAllocator ); uniquePipelines.reserve( createInfos.size() ); ObjectDestroy deleter( *this, allocator, d ); @@ -15329,9 +15333,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createExecutionGraphPipelineAMDXUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createExecutionGraphPipelineAMDXUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, UniqueHandle( pipeline, ObjectDestroy( *this, allocator, d ) ) ); @@ -15364,9 +15368,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ExecutionGraphPipelineScratchSizeAMDX sizeInfo; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetExecutionGraphPipelineScratchSizeAMDX( m_device, static_cast( executionGraph ), reinterpret_cast( &sizeInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getExecutionGraphPipelineScratchSizeAMDX" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getExecutionGraphPipelineScratchSizeAMDX" ); - return createResultValueType( result, std::move( sizeInfo ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( sizeInfo ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -15395,9 +15399,9 @@ namespace VULKAN_HPP_NAMESPACE uint32_t nodeIndex; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetExecutionGraphPipelineNodeIndexAMDX( m_device, static_cast( executionGraph ), reinterpret_cast( &nodeInfo ), &nodeIndex ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getExecutionGraphPipelineNodeIndexAMDX" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getExecutionGraphPipelineNodeIndexAMDX" ); - return createResultValueType( result, std::move( nodeIndex ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( nodeIndex ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -15730,9 +15734,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &accelerationStructure ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureKHR" ); - return createResultValueType( result, std::move( accelerationStructure ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( accelerationStructure ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -15753,9 +15757,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &accelerationStructure ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureKHRUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( accelerationStructure, ObjectDestroy( *this, allocator, d ) ) ); } @@ -15962,7 +15966,7 @@ namespace VULKAN_HPP_NAMESPACE infos.size(), reinterpret_cast( infos.data() ), reinterpret_cast( pBuildRangeInfos.data() ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::buildAccelerationStructuresKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); @@ -15995,7 +15999,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkCopyAccelerationStructureKHR( m_device, static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyAccelerationStructureKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); @@ -16030,7 +16034,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkCopyAccelerationStructureToMemoryKHR( m_device, static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyAccelerationStructureToMemoryKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); @@ -16065,7 +16069,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkCopyMemoryToAccelerationStructureKHR( m_device, static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToAccelerationStructureKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); @@ -16120,9 +16124,9 @@ namespace VULKAN_HPP_NAMESPACE data.size() * sizeof( DataType ), reinterpret_cast( data.data() ), stride ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeAccelerationStructuresPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeAccelerationStructuresPropertiesKHR" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } template @@ -16147,9 +16151,9 @@ namespace VULKAN_HPP_NAMESPACE sizeof( DataType ), reinterpret_cast( &data ), stride ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeAccelerationStructuresPropertyKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeAccelerationStructuresPropertyKHR" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -16464,12 +16468,12 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHR", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, std::move( pipelines ) ); } @@ -16499,12 +16503,12 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHR", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, std::move( pipelines ) ); } @@ -16531,12 +16535,12 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineKHR", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineKHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue( result, std::move( pipeline ) ); } @@ -16565,12 +16569,12 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHRUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHRUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); std::vector, PipelineAllocator> uniquePipelines; uniquePipelines.reserve( createInfos.size() ); ObjectDestroy deleter( *this, allocator, d ); @@ -16608,12 +16612,12 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHRUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHRUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); std::vector, PipelineAllocator> uniquePipelines( pipelineAllocator ); uniquePipelines.reserve( createInfos.size() ); ObjectDestroy deleter( *this, allocator, d ); @@ -16646,12 +16650,12 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineKHRUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineKHRUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, UniqueHandle( pipeline, ObjectDestroy( *this, allocator, d ) ) ); @@ -16687,9 +16691,9 @@ namespace VULKAN_HPP_NAMESPACE std::vector data( dataSize / sizeof( DataType ) ); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetRayTracingShaderGroupHandlesKHR( m_device, static_cast( pipeline ), firstGroup, groupCount, data.size() * sizeof( DataType ), reinterpret_cast( data.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandlesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandlesKHR" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } template @@ -16705,9 +16709,9 @@ namespace VULKAN_HPP_NAMESPACE DataType data; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetRayTracingShaderGroupHandlesKHR( m_device, static_cast( pipeline ), firstGroup, groupCount, sizeof( DataType ), reinterpret_cast( &data ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandleKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandleKHR" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -16740,9 +16744,9 @@ namespace VULKAN_HPP_NAMESPACE std::vector data( dataSize / sizeof( DataType ) ); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( m_device, static_cast( pipeline ), firstGroup, groupCount, data.size() * sizeof( DataType ), reinterpret_cast( data.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingCaptureReplayShaderGroupHandlesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingCaptureReplayShaderGroupHandlesKHR" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } template @@ -16758,9 +16762,9 @@ namespace VULKAN_HPP_NAMESPACE DataType data; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( m_device, static_cast( pipeline ), firstGroup, groupCount, sizeof( DataType ), reinterpret_cast( &data ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingCaptureReplayShaderGroupHandleKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingCaptureReplayShaderGroupHandleKHR" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -16857,9 +16861,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &ycbcrConversion ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversionKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversionKHR" ); - return createResultValueType( result, std::move( ycbcrConversion ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( ycbcrConversion ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -16881,9 +16885,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &ycbcrConversion ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversionKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversionKHRUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( ycbcrConversion, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -16942,9 +16946,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkBindBufferMemory2KHR( m_device, bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory2KHR" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -16969,9 +16973,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkBindImageMemory2KHR( m_device, bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory2KHR" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -17000,9 +17004,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT properties; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetImageDrmFormatModifierPropertiesEXT( m_device, static_cast( image ), reinterpret_cast( &properties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageDrmFormatModifierPropertiesEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageDrmFormatModifierPropertiesEXT" ); - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -17039,9 +17043,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &validationCache ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createValidationCacheEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createValidationCacheEXT" ); - return createResultValueType( result, std::move( validationCache ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( validationCache ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -17062,9 +17066,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &validationCache ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createValidationCacheEXTUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createValidationCacheEXTUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( validationCache, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -17151,9 +17155,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkMergeValidationCachesEXT( m_device, static_cast( dstCache ), srcCaches.size(), reinterpret_cast( srcCaches.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mergeValidationCachesEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mergeValidationCachesEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -17191,13 +17195,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetValidationCacheDataEXT( m_device, static_cast( validationCache ), &dataSize, reinterpret_cast( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getValidationCacheDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getValidationCacheDataEXT" ); VULKAN_HPP_ASSERT( dataSize <= data.size() ); if ( dataSize < data.size() ) { data.resize( dataSize ); } - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } template ( validationCache ), &dataSize, reinterpret_cast( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getValidationCacheDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getValidationCacheDataEXT" ); VULKAN_HPP_ASSERT( dataSize <= data.size() ); if ( dataSize < data.size() ) { data.resize( dataSize ); } - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -17340,9 +17344,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &accelerationStructure ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureNV" ); - return createResultValueType( result, std::move( accelerationStructure ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( accelerationStructure ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -17363,9 +17367,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &accelerationStructure ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureNVUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureNVUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( accelerationStructure, ObjectDestroy( *this, allocator, d ) ) ); } @@ -17502,9 +17506,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkBindAccelerationStructureMemoryNV( m_device, bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindAccelerationStructureMemoryNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindAccelerationStructureMemoryNV" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -17646,9 +17650,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNV", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNV", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, std::move( pipelines ) ); } @@ -17676,9 +17680,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNV", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNV", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, std::move( pipelines ) ); } @@ -17703,9 +17707,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineNV", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineNV", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue( result, std::move( pipeline ) ); } @@ -17731,9 +17735,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNVUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNVUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); std::vector, PipelineAllocator> uniquePipelines; uniquePipelines.reserve( createInfos.size() ); ObjectDestroy deleter( *this, allocator, d ); @@ -17768,9 +17772,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNVUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNVUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); std::vector, PipelineAllocator> uniquePipelines( pipelineAllocator ); uniquePipelines.reserve( createInfos.size() ); ObjectDestroy deleter( *this, allocator, d ); @@ -17801,9 +17805,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineNVUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineNVUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); return ResultValue>( result, UniqueHandle( pipeline, ObjectDestroy( *this, allocator, d ) ) ); @@ -17839,9 +17843,9 @@ namespace VULKAN_HPP_NAMESPACE std::vector data( dataSize / sizeof( DataType ) ); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetRayTracingShaderGroupHandlesNV( m_device, static_cast( pipeline ), firstGroup, groupCount, data.size() * sizeof( DataType ), reinterpret_cast( data.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandlesNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandlesNV" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } template @@ -17857,9 +17861,9 @@ namespace VULKAN_HPP_NAMESPACE DataType data; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetRayTracingShaderGroupHandlesNV( m_device, static_cast( pipeline ), firstGroup, groupCount, sizeof( DataType ), reinterpret_cast( &data ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandleNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandleNV" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -17888,9 +17892,9 @@ namespace VULKAN_HPP_NAMESPACE std::vector data( dataSize / sizeof( DataType ) ); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetAccelerationStructureHandleNV( m_device, static_cast( accelerationStructure ), data.size() * sizeof( DataType ), reinterpret_cast( data.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAccelerationStructureHandleNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAccelerationStructureHandleNV" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } template @@ -17905,9 +17909,9 @@ namespace VULKAN_HPP_NAMESPACE DataType data; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetAccelerationStructureHandleNV( m_device, static_cast( accelerationStructure ), sizeof( DataType ), reinterpret_cast( &data ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAccelerationStructureHandleNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAccelerationStructureHandleNV" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -17973,9 +17977,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkCompileDeferredNV( m_device, static_cast( pipeline ), shader ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::compileDeferredNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::compileDeferredNV" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -18104,9 +18108,9 @@ namespace VULKAN_HPP_NAMESPACE static_cast( handleType ), pHostPointer, reinterpret_cast( &memoryHostPointerProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryHostPointerPropertiesEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryHostPointerPropertiesEXT" ); - return createResultValueType( result, std::move( memoryHostPointerProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( memoryHostPointerProperties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -18163,13 +18167,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( m_physicalDevice, &timeDomainCount, reinterpret_cast( timeDomains.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCalibrateableTimeDomainsEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCalibrateableTimeDomainsEXT" ); VULKAN_HPP_ASSERT( timeDomainCount <= timeDomains.size() ); if ( timeDomainCount < timeDomains.size() ) { timeDomains.resize( timeDomainCount ); } - return createResultValueType( result, std::move( timeDomains ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( timeDomains ) ); } template ( timeDomains.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCalibrateableTimeDomainsEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCalibrateableTimeDomainsEXT" ); VULKAN_HPP_ASSERT( timeDomainCount <= timeDomains.size() ); if ( timeDomainCount < timeDomains.size() ) { timeDomains.resize( timeDomainCount ); } - return createResultValueType( result, std::move( timeDomains ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( timeDomains ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -18237,9 +18241,9 @@ namespace VULKAN_HPP_NAMESPACE uint64_t & maxDeviation = data_.second; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetCalibratedTimestampsEXT( m_device, timestampInfos.size(), reinterpret_cast( timestampInfos.data() ), timestamps.data(), &maxDeviation ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsEXT" ); - return createResultValueType( result, std::move( data_ ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data_ ) ); } template ( d.vkGetCalibratedTimestampsEXT( m_device, timestampInfos.size(), reinterpret_cast( timestampInfos.data() ), timestamps.data(), &maxDeviation ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsEXT" ); - return createResultValueType( result, std::move( data_ ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data_ ) ); } template @@ -18282,9 +18286,9 @@ namespace VULKAN_HPP_NAMESPACE uint64_t & maxDeviation = data_.second; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetCalibratedTimestampsEXT( m_device, 1, reinterpret_cast( ×tampInfo ), ×tamp, &maxDeviation ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampEXT" ); - return createResultValueType( result, std::move( data_ ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data_ ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -18488,9 +18492,9 @@ namespace VULKAN_HPP_NAMESPACE uint64_t value; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetSemaphoreCounterValueKHR( m_device, static_cast( semaphore ), &value ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreCounterValueKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreCounterValueKHR" ); - return createResultValueType( result, std::move( value ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( value ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -18515,7 +18519,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkWaitSemaphoresKHR( m_device, reinterpret_cast( &waitInfo ), timeout ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitSemaphoresKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout } ); return static_cast( result ); @@ -18542,9 +18546,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkSignalSemaphoreKHR( m_device, reinterpret_cast( &signalInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::signalSemaphoreKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::signalSemaphoreKHR" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -18571,9 +18575,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkInitializePerformanceApiINTEL( m_device, reinterpret_cast( &initializeInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::initializePerformanceApiINTEL" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::initializePerformanceApiINTEL" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -18604,9 +18608,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkCmdSetPerformanceMarkerINTEL( m_commandBuffer, reinterpret_cast( &markerInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceMarkerINTEL" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceMarkerINTEL" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -18631,9 +18635,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkCmdSetPerformanceStreamMarkerINTEL( m_commandBuffer, reinterpret_cast( &markerInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceStreamMarkerINTEL" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceStreamMarkerINTEL" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -18658,9 +18662,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkCmdSetPerformanceOverrideINTEL( m_commandBuffer, reinterpret_cast( &overrideInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceOverrideINTEL" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceOverrideINTEL" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -18691,9 +18695,9 @@ namespace VULKAN_HPP_NAMESPACE d.vkAcquirePerformanceConfigurationINTEL( m_device, reinterpret_cast( &acquireInfo ), reinterpret_cast( &configuration ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquirePerformanceConfigurationINTEL" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquirePerformanceConfigurationINTEL" ); - return createResultValueType( result, std::move( configuration ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( configuration ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -18712,9 +18716,9 @@ namespace VULKAN_HPP_NAMESPACE d.vkAcquirePerformanceConfigurationINTEL( m_device, reinterpret_cast( &acquireInfo ), reinterpret_cast( &configuration ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquirePerformanceConfigurationINTELUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquirePerformanceConfigurationINTELUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( configuration, ObjectRelease( *this, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -18740,9 +18744,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkReleasePerformanceConfigurationINTEL( m_device, static_cast( configuration ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::releasePerformanceConfigurationINTEL" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::releasePerformanceConfigurationINTEL" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -18766,9 +18770,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkReleasePerformanceConfigurationINTEL( m_device, static_cast( configuration ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::release" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::release" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -18793,9 +18797,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkQueueSetPerformanceConfigurationINTEL( m_queue, static_cast( configuration ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::setPerformanceConfigurationINTEL" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::setPerformanceConfigurationINTEL" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -18822,9 +18826,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::PerformanceValueINTEL value; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetPerformanceParameterINTEL( m_device, static_cast( parameter ), reinterpret_cast( &value ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPerformanceParameterINTEL" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPerformanceParameterINTEL" ); - return createResultValueType( result, std::move( value ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( value ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -18874,9 +18878,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createImagePipeSurfaceFUCHSIA" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createImagePipeSurfaceFUCHSIA" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -18897,9 +18901,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createImagePipeSurfaceFUCHSIAUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createImagePipeSurfaceFUCHSIAUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -18940,9 +18944,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMetalSurfaceEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMetalSurfaceEXT" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -18963,9 +18967,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMetalSurfaceEXTUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMetalSurfaceEXTUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -19011,13 +19015,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, &fragmentShadingRateCount, reinterpret_cast( fragmentShadingRates.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getFragmentShadingRatesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getFragmentShadingRatesKHR" ); VULKAN_HPP_ASSERT( fragmentShadingRateCount <= fragmentShadingRates.size() ); if ( fragmentShadingRateCount < fragmentShadingRates.size() ) { fragmentShadingRates.resize( fragmentShadingRateCount ); } - return createResultValueType( result, std::move( fragmentShadingRates ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( fragmentShadingRates ) ); } template ( fragmentShadingRates.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getFragmentShadingRatesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getFragmentShadingRatesKHR" ); VULKAN_HPP_ASSERT( fragmentShadingRateCount <= fragmentShadingRates.size() ); if ( fragmentShadingRateCount < fragmentShadingRates.size() ) { fragmentShadingRates.resize( fragmentShadingRateCount ); } - return createResultValueType( result, std::move( fragmentShadingRates ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( fragmentShadingRates ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -19113,17 +19117,20 @@ namespace VULKAN_HPP_NAMESPACE #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - VULKAN_HPP_INLINE void CommandBuffer::setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR * pLocationInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_INLINE void + CommandBuffer::setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR * pInputAttachmentIndexInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); - d.vkCmdSetRenderingInputAttachmentIndicesKHR( m_commandBuffer, reinterpret_cast( pLocationInfo ) ); + d.vkCmdSetRenderingInputAttachmentIndicesKHR( m_commandBuffer, + reinterpret_cast( pInputAttachmentIndexInfo ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - VULKAN_HPP_INLINE void CommandBuffer::setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR & locationInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_INLINE void + CommandBuffer::setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR & inputAttachmentIndexInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) @@ -19131,7 +19138,8 @@ namespace VULKAN_HPP_NAMESPACE "Function requires " ); # endif - d.vkCmdSetRenderingInputAttachmentIndicesKHR( m_commandBuffer, reinterpret_cast( &locationInfo ) ); + d.vkCmdSetRenderingInputAttachmentIndicesKHR( m_commandBuffer, + reinterpret_cast( &inputAttachmentIndexInfo ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -19199,13 +19207,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceToolPropertiesEXT( m_physicalDevice, &toolCount, reinterpret_cast( toolProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getToolPropertiesEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getToolPropertiesEXT" ); VULKAN_HPP_ASSERT( toolCount <= toolProperties.size() ); if ( toolCount < toolProperties.size() ) { toolProperties.resize( toolCount ); } - return createResultValueType( result, std::move( toolProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( toolProperties ) ); } template < @@ -19237,13 +19245,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceToolPropertiesEXT( m_physicalDevice, &toolCount, reinterpret_cast( toolProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getToolPropertiesEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getToolPropertiesEXT" ); VULKAN_HPP_ASSERT( toolCount <= toolProperties.size() ); if ( toolCount < toolProperties.size() ) { toolProperties.resize( toolCount ); } - return createResultValueType( result, std::move( toolProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( toolProperties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -19271,9 +19279,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkWaitForPresentKHR( m_device, static_cast( swapchain ), presentId, timeout ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::waitForPresentKHR", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( + result, + VULKAN_HPP_NAMESPACE_STRING "::Device::waitForPresentKHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); return static_cast( result ); } @@ -19315,13 +19324,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCooperativeMatrixPropertiesNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCooperativeMatrixPropertiesNV" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template ( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCooperativeMatrixPropertiesNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCooperativeMatrixPropertiesNV" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -19401,13 +19410,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, &combinationCount, reinterpret_cast( combinations.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV" ); VULKAN_HPP_ASSERT( combinationCount <= combinations.size() ); if ( combinationCount < combinations.size() ) { combinations.resize( combinationCount ); } - return createResultValueType( result, std::move( combinations ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( combinations ) ); } template ( combinations.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV" ); VULKAN_HPP_ASSERT( combinationCount <= combinations.size() ); if ( combinationCount < combinations.size() ) { combinations.resize( combinationCount ); } - return createResultValueType( result, std::move( combinations ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( combinations ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -19496,13 +19505,13 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( presentModes.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfacePresentModes2EXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfacePresentModes2EXT" ); VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); if ( presentModeCount < presentModes.size() ) { presentModes.resize( presentModeCount ); } - return createResultValueType( result, std::move( presentModes ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( presentModes ) ); } template ( presentModes.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfacePresentModes2EXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfacePresentModes2EXT" ); VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); if ( presentModeCount < presentModes.size() ) { presentModes.resize( presentModeCount ); } - return createResultValueType( result, std::move( presentModes ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( presentModes ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -19566,9 +19575,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAcquireFullScreenExclusiveModeEXT( m_device, static_cast( swapchain ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireFullScreenExclusiveModeEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireFullScreenExclusiveModeEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -19592,9 +19601,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkReleaseFullScreenExclusiveModeEXT( m_device, static_cast( swapchain ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::releaseFullScreenExclusiveModeEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::releaseFullScreenExclusiveModeEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -19623,9 +19632,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetDeviceGroupSurfacePresentModes2EXT( m_device, reinterpret_cast( &surfaceInfo ), reinterpret_cast( &modes ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupSurfacePresentModes2EXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupSurfacePresentModes2EXT" ); - return createResultValueType( result, std::move( modes ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( modes ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ #endif /*VK_USE_PLATFORM_WIN32_KHR*/ @@ -19663,9 +19672,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createHeadlessSurfaceEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createHeadlessSurfaceEXT" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -19686,9 +19695,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createHeadlessSurfaceEXTUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createHeadlessSurfaceEXTUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -20006,9 +20015,9 @@ namespace VULKAN_HPP_NAMESPACE m_device, reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &deferredOperation ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDeferredOperationKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDeferredOperationKHR" ); - return createResultValueType( result, std::move( deferredOperation ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( deferredOperation ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -20026,9 +20035,9 @@ namespace VULKAN_HPP_NAMESPACE m_device, reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &deferredOperation ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDeferredOperationKHRUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createDeferredOperationKHRUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( deferredOperation, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -20143,9 +20152,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkDeferredOperationJoinKHR( m_device, static_cast( operation ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::deferredOperationJoinKHR", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eThreadDoneKHR, VULKAN_HPP_NAMESPACE::Result::eThreadIdleKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( + result, + VULKAN_HPP_NAMESPACE_STRING "::Device::deferredOperationJoinKHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eThreadDoneKHR, VULKAN_HPP_NAMESPACE::Result::eThreadIdleKHR } ); return static_cast( result ); } @@ -20195,13 +20205,13 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutablePropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutablePropertiesKHR" ); VULKAN_HPP_ASSERT( executableCount <= properties.size() ); if ( executableCount < properties.size() ) { properties.resize( executableCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template ( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutablePropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutablePropertiesKHR" ); VULKAN_HPP_ASSERT( executableCount <= properties.size() ); if ( executableCount < properties.size() ) { properties.resize( executableCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -20292,13 +20302,13 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( statistics.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutableStatisticsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutableStatisticsKHR" ); VULKAN_HPP_ASSERT( statisticCount <= statistics.size() ); if ( statisticCount < statistics.size() ) { statistics.resize( statisticCount ); } - return createResultValueType( result, std::move( statistics ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( statistics ) ); } template ( statistics.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutableStatisticsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutableStatisticsKHR" ); VULKAN_HPP_ASSERT( statisticCount <= statistics.size() ); if ( statisticCount < statistics.size() ) { statistics.resize( statisticCount ); } - return createResultValueType( result, std::move( statistics ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( statistics ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -20391,13 +20401,13 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( internalRepresentations.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutableInternalRepresentationsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutableInternalRepresentationsKHR" ); VULKAN_HPP_ASSERT( internalRepresentationCount <= internalRepresentations.size() ); if ( internalRepresentationCount < internalRepresentations.size() ) { internalRepresentations.resize( internalRepresentationCount ); } - return createResultValueType( result, std::move( internalRepresentations ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( internalRepresentations ) ); } template ( internalRepresentations.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutableInternalRepresentationsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutableInternalRepresentationsKHR" ); VULKAN_HPP_ASSERT( internalRepresentationCount <= internalRepresentations.size() ); if ( internalRepresentationCount < internalRepresentations.size() ) { internalRepresentations.resize( internalRepresentationCount ); } - return createResultValueType( result, std::move( internalRepresentations ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( internalRepresentations ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -20468,9 +20478,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkCopyMemoryToImageEXT( m_device, reinterpret_cast( ©MemoryToImageInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToImageEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToImageEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -20494,9 +20504,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkCopyImageToMemoryEXT( m_device, reinterpret_cast( ©ImageToMemoryInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToMemoryEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToMemoryEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -20520,9 +20530,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkCopyImageToImageEXT( m_device, reinterpret_cast( ©ImageToImageInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToImageEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToImageEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -20549,9 +20559,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkTransitionImageLayoutEXT( m_device, transitions.size(), reinterpret_cast( transitions.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::transitionImageLayoutEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::transitionImageLayoutEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -20635,9 +20645,9 @@ namespace VULKAN_HPP_NAMESPACE void * pData; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkMapMemory2KHR( m_device, reinterpret_cast( &memoryMapInfo ), &pData ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mapMemory2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mapMemory2KHR" ); - return createResultValueType( result, std::move( pData ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( pData ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -20661,9 +20671,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkUnmapMemory2KHR( m_device, reinterpret_cast( &memoryUnmapInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::unmapMemory2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::unmapMemory2KHR" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -20689,9 +20699,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkReleaseSwapchainImagesEXT( m_device, reinterpret_cast( &releaseInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::releaseSwapchainImagesEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::releaseSwapchainImagesEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -20839,9 +20849,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &indirectCommandsLayout ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createIndirectCommandsLayoutNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createIndirectCommandsLayoutNV" ); - return createResultValueType( result, std::move( indirectCommandsLayout ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( indirectCommandsLayout ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -20862,11 +20872,11 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &indirectCommandsLayout ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createIndirectCommandsLayoutNVUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createIndirectCommandsLayoutNVUnique" ); - return createResultValueType( result, - UniqueHandle( - indirectCommandsLayout, ObjectDestroy( *this, allocator, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, + UniqueHandle( + indirectCommandsLayout, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -20974,9 +20984,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAcquireDrmDisplayEXT( m_physicalDevice, drmFd, static_cast( display ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::acquireDrmDisplayEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::acquireDrmDisplayEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -21003,9 +21013,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DisplayKHR display; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetDrmDisplayEXT( m_physicalDevice, drmFd, connectorId, reinterpret_cast( &display ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDrmDisplayEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDrmDisplayEXT" ); - return createResultValueType( result, std::move( display ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( display ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -21021,10 +21031,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DisplayKHR display; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetDrmDisplayEXT( m_physicalDevice, drmFd, connectorId, reinterpret_cast( &display ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDrmDisplayEXTUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDrmDisplayEXTUnique" ); - return createResultValueType( result, - UniqueHandle( display, ObjectRelease( *this, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( display, ObjectRelease( *this, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -21062,9 +21072,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &privateDataSlot ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPrivateDataSlotEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPrivateDataSlotEXT" ); - return createResultValueType( result, std::move( privateDataSlot ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( privateDataSlot ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -21085,9 +21095,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &privateDataSlot ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPrivateDataSlotEXTUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createPrivateDataSlotEXTUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( privateDataSlot, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -21147,9 +21157,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkSetPrivateDataEXT( m_device, static_cast( objectType_ ), objectHandle, static_cast( privateDataSlot ), data ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setPrivateDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setPrivateDataEXT" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -21215,9 +21225,9 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR( m_physicalDevice, reinterpret_cast( &qualityLevelInfo ), reinterpret_cast( &qualityLevelProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoEncodeQualityLevelPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoEncodeQualityLevelPropertiesKHR" ); - return createResultValueType( result, std::move( qualityLevelProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( qualityLevelProperties ) ); } template @@ -21238,9 +21248,9 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR( m_physicalDevice, reinterpret_cast( &qualityLevelInfo ), reinterpret_cast( &qualityLevelProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoEncodeQualityLevelPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoEncodeQualityLevelPropertiesKHR" ); - return createResultValueType( result, std::move( structureChain ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( structureChain ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -21297,9 +21307,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEncodedVideoSessionParametersKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEncodedVideoSessionParametersKHR" ); - return createResultValueType( result, std::move( data_ ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data_ ) ); } template ( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEncodedVideoSessionParametersKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEncodedVideoSessionParametersKHR" ); - return createResultValueType( result, std::move( data_ ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data_ ) ); } template @@ -21382,9 +21392,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEncodedVideoSessionParametersKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEncodedVideoSessionParametersKHR" ); - return createResultValueType( result, std::move( data_ ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data_ ) ); } template ( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEncodedVideoSessionParametersKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEncodedVideoSessionParametersKHR" ); - return createResultValueType( result, std::move( data_ ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data_ ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -21492,9 +21502,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &module ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCudaModuleNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCudaModuleNV" ); - return createResultValueType( result, std::move( module ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( module ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -21515,9 +21525,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &module ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCudaModuleNVUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCudaModuleNVUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( module, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -21556,13 +21566,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetCudaModuleCacheNV( m_device, static_cast( module ), &cacheSize, reinterpret_cast( cacheData.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCudaModuleCacheNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCudaModuleCacheNV" ); VULKAN_HPP_ASSERT( cacheSize <= cacheData.size() ); if ( cacheSize < cacheData.size() ) { cacheData.resize( cacheSize ); } - return createResultValueType( result, std::move( cacheData ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( cacheData ) ); } template ( module ), &cacheSize, reinterpret_cast( cacheData.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCudaModuleCacheNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCudaModuleCacheNV" ); VULKAN_HPP_ASSERT( cacheSize <= cacheData.size() ); if ( cacheSize < cacheData.size() ) { cacheData.resize( cacheSize ); } - return createResultValueType( result, std::move( cacheData ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( cacheData ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -21630,9 +21640,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &function ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCudaFunctionNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCudaFunctionNV" ); - return createResultValueType( result, std::move( function ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( function ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -21653,9 +21663,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &function ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCudaFunctionNVUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createCudaFunctionNVUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( function, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -21960,9 +21970,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkQueueSubmit2KHR( m_queue, submits.size(), reinterpret_cast( submits.data() ), static_cast( fence ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit2KHR" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -22244,9 +22254,9 @@ namespace VULKAN_HPP_NAMESPACE DataType data; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetBufferOpaqueCaptureDescriptorDataEXT( m_device, reinterpret_cast( &info ), &data ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getBufferOpaqueCaptureDescriptorDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getBufferOpaqueCaptureDescriptorDataEXT" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -22273,9 +22283,9 @@ namespace VULKAN_HPP_NAMESPACE DataType data; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetImageOpaqueCaptureDescriptorDataEXT( m_device, reinterpret_cast( &info ), &data ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageOpaqueCaptureDescriptorDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageOpaqueCaptureDescriptorDataEXT" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -22302,9 +22312,9 @@ namespace VULKAN_HPP_NAMESPACE DataType data; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetImageViewOpaqueCaptureDescriptorDataEXT( m_device, reinterpret_cast( &info ), &data ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageViewOpaqueCaptureDescriptorDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageViewOpaqueCaptureDescriptorDataEXT" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -22331,9 +22341,9 @@ namespace VULKAN_HPP_NAMESPACE DataType data; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetSamplerOpaqueCaptureDescriptorDataEXT( m_device, reinterpret_cast( &info ), &data ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSamplerOpaqueCaptureDescriptorDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSamplerOpaqueCaptureDescriptorDataEXT" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -22361,9 +22371,9 @@ namespace VULKAN_HPP_NAMESPACE DataType data; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT( m_device, reinterpret_cast( &info ), &data ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAccelerationStructureOpaqueCaptureDescriptorDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAccelerationStructureOpaqueCaptureDescriptorDataEXT" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -22564,56 +22574,6 @@ namespace VULKAN_HPP_NAMESPACE return static_cast( d.vkGetDeviceFaultInfoEXT( m_device, reinterpret_cast( pFaultCounts ), reinterpret_cast( pFaultInfo ) ) ); } - -#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE - typename ResultValueType>::type - Device::getFaultInfoEXT( Dispatch const & d ) const - { - VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); -# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkGetDeviceFaultInfoEXT && "Function requires " ); -# endif - - std::pair data_; - VULKAN_HPP_NAMESPACE::DeviceFaultCountsEXT & faultCounts = data_.first; - VULKAN_HPP_NAMESPACE::DeviceFaultInfoEXT & faultInfo = data_.second; - VULKAN_HPP_NAMESPACE::Result result; - do - { - result = - static_cast( d.vkGetDeviceFaultInfoEXT( m_device, reinterpret_cast( &faultCounts ), nullptr ) ); - if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) - { - std::free( faultInfo.pAddressInfos ); - if ( faultCounts.addressInfoCount ) - { - faultInfo.pAddressInfos = reinterpret_cast( - std::malloc( faultCounts.addressInfoCount * sizeof( VULKAN_HPP_NAMESPACE::DeviceFaultAddressInfoEXT ) ) ); - } - std::free( faultInfo.pVendorInfos ); - if ( faultCounts.vendorInfoCount ) - { - faultInfo.pVendorInfos = reinterpret_cast( - std::malloc( faultCounts.vendorInfoCount * sizeof( VULKAN_HPP_NAMESPACE::DeviceFaultVendorInfoEXT ) ) ); - } - std::free( faultInfo.pVendorBinaryData ); - if ( faultCounts.vendorBinarySize ) - { - faultInfo.pVendorBinaryData = std::malloc( faultCounts.vendorBinarySize ); - } - result = static_cast( d.vkGetDeviceFaultInfoEXT( - m_device, reinterpret_cast( &faultCounts ), reinterpret_cast( &faultInfo ) ) ); - } - } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( - result, VULKAN_HPP_NAMESPACE_STRING "::Device::getFaultInfoEXT", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncomplete } ); - - return createResultValueType( result, std::move( data_ ) ); - } -#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ - #if defined( VK_USE_PLATFORM_WIN32_KHR ) //=== VK_NV_acquire_winrt_display === @@ -22637,9 +22597,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkAcquireWinrtDisplayNV( m_physicalDevice, static_cast( display ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::acquireWinrtDisplayNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::acquireWinrtDisplayNV" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -22665,9 +22625,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DisplayKHR display; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetWinrtDisplayNV( m_physicalDevice, deviceRelativeId, reinterpret_cast( &display ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getWinrtDisplayNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getWinrtDisplayNV" ); - return createResultValueType( result, std::move( display ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( display ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -22683,10 +22643,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DisplayKHR display; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetWinrtDisplayNV( m_physicalDevice, deviceRelativeId, reinterpret_cast( &display ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getWinrtDisplayNVUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getWinrtDisplayNVUnique" ); - return createResultValueType( result, - UniqueHandle( display, ObjectRelease( *this, d ) ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( + result, UniqueHandle( display, ObjectRelease( *this, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -22726,9 +22686,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDirectFBSurfaceEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDirectFBSurfaceEXT" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -22749,9 +22709,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDirectFBSurfaceEXTUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDirectFBSurfaceEXTUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -22848,9 +22808,9 @@ namespace VULKAN_HPP_NAMESPACE zx_handle_t zirconHandle; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetMemoryZirconHandleFUCHSIA( m_device, reinterpret_cast( &getZirconHandleInfo ), &zirconHandle ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryZirconHandleFUCHSIA" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryZirconHandleFUCHSIA" ); - return createResultValueType( result, std::move( zirconHandle ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( zirconHandle ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -22888,9 +22848,9 @@ namespace VULKAN_HPP_NAMESPACE static_cast( handleType ), zirconHandle, reinterpret_cast( &memoryZirconHandleProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryZirconHandlePropertiesFUCHSIA" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryZirconHandlePropertiesFUCHSIA" ); - return createResultValueType( result, std::move( memoryZirconHandleProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( memoryZirconHandleProperties ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ #endif /*VK_USE_PLATFORM_FUCHSIA*/ @@ -22920,9 +22880,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkImportSemaphoreZirconHandleFUCHSIA( m_device, reinterpret_cast( &importSemaphoreZirconHandleInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreZirconHandleFUCHSIA" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreZirconHandleFUCHSIA" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -22950,9 +22910,9 @@ namespace VULKAN_HPP_NAMESPACE zx_handle_t zirconHandle; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetSemaphoreZirconHandleFUCHSIA( m_device, reinterpret_cast( &getZirconHandleInfo ), &zirconHandle ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreZirconHandleFUCHSIA" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreZirconHandleFUCHSIA" ); - return createResultValueType( result, std::move( zirconHandle ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( zirconHandle ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ #endif /*VK_USE_PLATFORM_FUCHSIA*/ @@ -22992,9 +22952,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &collection ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferCollectionFUCHSIA" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferCollectionFUCHSIA" ); - return createResultValueType( result, std::move( collection ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( collection ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -23015,9 +22975,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &collection ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferCollectionFUCHSIAUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferCollectionFUCHSIAUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( collection, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -23049,9 +23009,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkSetBufferCollectionImageConstraintsFUCHSIA( m_device, static_cast( collection ), reinterpret_cast( &imageConstraintsInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setBufferCollectionImageConstraintsFUCHSIA" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setBufferCollectionImageConstraintsFUCHSIA" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -23081,9 +23041,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkSetBufferCollectionBufferConstraintsFUCHSIA( m_device, static_cast( collection ), reinterpret_cast( &bufferConstraintsInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setBufferCollectionBufferConstraintsFUCHSIA" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setBufferCollectionBufferConstraintsFUCHSIA" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -23168,9 +23128,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::BufferCollectionPropertiesFUCHSIA properties; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetBufferCollectionPropertiesFUCHSIA( m_device, static_cast( collection ), reinterpret_cast( &properties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getBufferCollectionPropertiesFUCHSIA" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getBufferCollectionPropertiesFUCHSIA" ); - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ #endif /*VK_USE_PLATFORM_FUCHSIA*/ @@ -23201,9 +23161,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Extent2D maxWorkgroupSize; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI( m_device, static_cast( renderpass ), reinterpret_cast( &maxWorkgroupSize ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSubpassShadingMaxWorkgroupSizeHUAWEI" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSubpassShadingMaxWorkgroupSizeHUAWEI" ); - return createResultValueType( result, std::move( maxWorkgroupSize ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( maxWorkgroupSize ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -23251,9 +23211,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::RemoteAddressNV address; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetMemoryRemoteAddressNV( m_device, reinterpret_cast( &memoryGetRemoteAddressInfo ), reinterpret_cast( &address ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryRemoteAddressNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryRemoteAddressNV" ); - return createResultValueType( result, std::move( address ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( address ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -23282,9 +23242,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::BaseOutStructure pipelineProperties; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetPipelinePropertiesEXT( m_device, reinterpret_cast( &pipelineInfo ), reinterpret_cast( &pipelineProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelinePropertiesEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelinePropertiesEXT" ); - return createResultValueType( result, std::move( pipelineProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( pipelineProperties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -23361,9 +23321,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createScreenSurfaceQNX" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createScreenSurfaceQNX" ); - return createResultValueType( result, std::move( surface ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( surface ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -23384,9 +23344,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createScreenSurfaceQNXUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::createScreenSurfaceQNXUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( surface, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -23560,9 +23520,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( µmap ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createMicromapEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createMicromapEXT" ); - return createResultValueType( result, std::move( micromap ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( micromap ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -23583,9 +23543,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( µmap ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createMicromapEXTUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createMicromapEXTUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( micromap, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -23691,7 +23651,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkBuildMicromapsEXT( m_device, static_cast( deferredOperation ), infos.size(), reinterpret_cast( infos.data() ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::buildMicromapsEXT", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); @@ -23723,7 +23683,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkCopyMicromapEXT( m_device, static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMicromapEXT", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); @@ -23754,7 +23714,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkCopyMicromapToMemoryEXT( m_device, static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMicromapToMemoryEXT", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); @@ -23785,7 +23745,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkCopyMemoryToMicromapEXT( m_device, static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToMicromapEXT", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); @@ -23832,9 +23792,9 @@ namespace VULKAN_HPP_NAMESPACE data.size() * sizeof( DataType ), reinterpret_cast( data.data() ), stride ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeMicromapsPropertiesEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeMicromapsPropertiesEXT" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } template @@ -23858,9 +23818,9 @@ namespace VULKAN_HPP_NAMESPACE sizeof( DataType ), reinterpret_cast( &data ), stride ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeMicromapsPropertyEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeMicromapsPropertyEXT" ); - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -24963,13 +24923,13 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( imageFormatProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getOpticalFlowImageFormatsNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getOpticalFlowImageFormatsNV" ); VULKAN_HPP_ASSERT( formatCount <= imageFormatProperties.size() ); if ( formatCount < imageFormatProperties.size() ) { imageFormatProperties.resize( formatCount ); } - return createResultValueType( result, std::move( imageFormatProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( imageFormatProperties ) ); } template ( imageFormatProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getOpticalFlowImageFormatsNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getOpticalFlowImageFormatsNV" ); VULKAN_HPP_ASSERT( formatCount <= imageFormatProperties.size() ); if ( formatCount < imageFormatProperties.size() ) { imageFormatProperties.resize( formatCount ); } - return createResultValueType( result, std::move( imageFormatProperties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( imageFormatProperties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -25048,9 +25008,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &session ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createOpticalFlowSessionNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createOpticalFlowSessionNV" ); - return createResultValueType( result, std::move( session ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( session ) ); } # ifndef VULKAN_HPP_NO_SMART_HANDLE @@ -25071,9 +25031,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &session ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createOpticalFlowSessionNVUnique" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::createOpticalFlowSessionNVUnique" ); - return createResultValueType( + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, UniqueHandle( session, ObjectDestroy( *this, allocator, d ) ) ); } # endif /* VULKAN_HPP_NO_SMART_HANDLE */ @@ -25168,9 +25128,9 @@ namespace VULKAN_HPP_NAMESPACE static_cast( bindingPoint ), static_cast( view ), static_cast( layout ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindOpticalFlowSessionImageNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindOpticalFlowSessionImageNV" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -25346,6 +25306,28 @@ namespace VULKAN_HPP_NAMESPACE } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + //=== VK_AMD_anti_lag === + + template + VULKAN_HPP_INLINE void Device::antiLagUpdateAMD( const VULKAN_HPP_NAMESPACE::AntiLagDataAMD * pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkAntiLagUpdateAMD( m_device, reinterpret_cast( pData ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::antiLagUpdateAMD( const VULKAN_HPP_NAMESPACE::AntiLagDataAMD & data, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkAntiLagUpdateAMD && "Function requires " ); +# endif + + d.vkAntiLagUpdateAMD( m_device, reinterpret_cast( &data ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + //=== VK_EXT_shader_object === template @@ -25382,9 +25364,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( shaders.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createShadersEXT", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncompatibleShaderBinaryEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createShadersEXT", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncompatibleShaderBinaryEXT } ); return ResultValue>( result, std::move( shaders ) ); } @@ -25410,9 +25392,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( shaders.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createShadersEXT", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncompatibleShaderBinaryEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createShadersEXT", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncompatibleShaderBinaryEXT } ); return ResultValue>( result, std::move( shaders ) ); } @@ -25435,9 +25417,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &shader ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createShaderEXT", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncompatibleShaderBinaryEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createShaderEXT", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncompatibleShaderBinaryEXT } ); return ResultValue( result, std::move( shader ) ); } @@ -25461,9 +25443,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( shaders.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createShadersEXTUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncompatibleShaderBinaryEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createShadersEXTUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncompatibleShaderBinaryEXT } ); std::vector, ShaderEXTAllocator> uniqueShaders; uniqueShaders.reserve( createInfos.size() ); ObjectDestroy deleter( *this, allocator, d ); @@ -25496,9 +25478,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( shaders.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createShadersEXTUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncompatibleShaderBinaryEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createShadersEXTUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncompatibleShaderBinaryEXT } ); std::vector, ShaderEXTAllocator> uniqueShaders( shaderEXTAllocator ); uniqueShaders.reserve( createInfos.size() ); ObjectDestroy deleter( *this, allocator, d ); @@ -25527,9 +25509,9 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &shader ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::createShaderEXTUnique", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncompatibleShaderBinaryEXT } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createShaderEXTUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncompatibleShaderBinaryEXT } ); return ResultValue>( result, UniqueHandle( shader, ObjectDestroy( *this, allocator, d ) ) ); @@ -25620,13 +25602,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetShaderBinaryDataEXT( m_device, static_cast( shader ), &dataSize, reinterpret_cast( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getShaderBinaryDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getShaderBinaryDataEXT" ); VULKAN_HPP_ASSERT( dataSize <= data.size() ); if ( dataSize < data.size() ) { data.resize( dataSize ); } - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } template ( shader ), &dataSize, reinterpret_cast( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getShaderBinaryDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getShaderBinaryDataEXT" ); VULKAN_HPP_ASSERT( dataSize <= data.size() ); if ( dataSize < data.size() ) { data.resize( dataSize ); } - return createResultValueType( result, std::move( data ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -25700,6 +25682,469 @@ namespace VULKAN_HPP_NAMESPACE } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + //=== VK_KHR_pipeline_binary === + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createPipelineBinariesKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR * pCreateInfo, + const VULKAN_HPP_NAMESPACE::AllocationCallbacks * pAllocator, + VULKAN_HPP_NAMESPACE::PipelineBinaryHandlesInfoKHR * pBinaries, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + return static_cast( d.vkCreatePipelineBinariesKHR( m_device, + reinterpret_cast( pCreateInfo ), + reinterpret_cast( pAllocator ), + reinterpret_cast( pBinaries ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> + Device::createPipelineBinariesKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR & createInfo, + Optional allocator, + Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkCreatePipelineBinariesKHR && "Function requires " ); +# endif + + std::vector pipelineBinaries; + VULKAN_HPP_NAMESPACE::PipelineBinaryHandlesInfoKHR binaries; + VULKAN_HPP_NAMESPACE::Result result; + if ( createInfo.pKeysAndDataInfo ) + { + VULKAN_HPP_ASSERT( !createInfo.pipeline && !createInfo.pPipelineCreateInfo ); + pipelineBinaries.resize( createInfo.pKeysAndDataInfo->binaryCount ); + binaries.pipelineBinaryCount = createInfo.pKeysAndDataInfo->binaryCount; + binaries.pPipelineBinaries = pipelineBinaries.data(); + result = static_cast( d.vkCreatePipelineBinariesKHR( + m_device, + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + } + else + { + VULKAN_HPP_ASSERT( !createInfo.pipeline ^ !createInfo.pPipelineCreateInfo ); + result = static_cast( d.vkCreatePipelineBinariesKHR( + m_device, + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + pipelineBinaries.resize( binaries.pipelineBinaryCount ); + binaries.pPipelineBinaries = pipelineBinaries.data(); + result = static_cast( d.vkCreatePipelineBinariesKHR( + m_device, + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + } + } + + VULKAN_HPP_NAMESPACE::detail::resultCheck( + result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineBinariesKHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncomplete, VULKAN_HPP_NAMESPACE::Result::ePipelineBinaryMissingKHR } ); + + return ResultValue>( result, std::move( pipelineBinaries ) ); + } + + template ::value, int>::type> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> + Device::createPipelineBinariesKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR & createInfo, + Optional allocator, + PipelineBinaryKHRAllocator & pipelineBinaryKHRAllocator, + Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkCreatePipelineBinariesKHR && "Function requires " ); +# endif + + std::vector pipelineBinaries( pipelineBinaryKHRAllocator ); + VULKAN_HPP_NAMESPACE::PipelineBinaryHandlesInfoKHR binaries; + VULKAN_HPP_NAMESPACE::Result result; + if ( createInfo.pKeysAndDataInfo ) + { + VULKAN_HPP_ASSERT( !createInfo.pipeline && !createInfo.pPipelineCreateInfo ); + pipelineBinaries.resize( createInfo.pKeysAndDataInfo->binaryCount ); + binaries.pipelineBinaryCount = createInfo.pKeysAndDataInfo->binaryCount; + binaries.pPipelineBinaries = pipelineBinaries.data(); + result = static_cast( d.vkCreatePipelineBinariesKHR( + m_device, + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + } + else + { + VULKAN_HPP_ASSERT( !createInfo.pipeline ^ !createInfo.pPipelineCreateInfo ); + result = static_cast( d.vkCreatePipelineBinariesKHR( + m_device, + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + pipelineBinaries.resize( binaries.pipelineBinaryCount ); + binaries.pPipelineBinaries = pipelineBinaries.data(); + result = static_cast( d.vkCreatePipelineBinariesKHR( + m_device, + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + } + } + + VULKAN_HPP_NAMESPACE::detail::resultCheck( + result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineBinariesKHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncomplete, VULKAN_HPP_NAMESPACE::Result::ePipelineBinaryMissingKHR } ); + + return ResultValue>( result, std::move( pipelineBinaries ) ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue, PipelineBinaryKHRAllocator>> + Device::createPipelineBinariesKHRUnique( const VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR & createInfo, + Optional allocator, + Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkCreatePipelineBinariesKHR && "Function requires " ); +# endif + + std::vector pipelineBinaries; + VULKAN_HPP_NAMESPACE::PipelineBinaryHandlesInfoKHR binaries; + VULKAN_HPP_NAMESPACE::Result result; + if ( createInfo.pKeysAndDataInfo ) + { + VULKAN_HPP_ASSERT( !createInfo.pipeline && !createInfo.pPipelineCreateInfo ); + pipelineBinaries.resize( createInfo.pKeysAndDataInfo->binaryCount ); + binaries.pipelineBinaryCount = createInfo.pKeysAndDataInfo->binaryCount; + binaries.pPipelineBinaries = pipelineBinaries.data(); + result = static_cast( d.vkCreatePipelineBinariesKHR( + m_device, + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + } + else + { + VULKAN_HPP_ASSERT( !createInfo.pipeline ^ !createInfo.pPipelineCreateInfo ); + result = static_cast( d.vkCreatePipelineBinariesKHR( + m_device, + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + pipelineBinaries.resize( binaries.pipelineBinaryCount ); + binaries.pPipelineBinaries = pipelineBinaries.data(); + result = static_cast( d.vkCreatePipelineBinariesKHR( + m_device, + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + } + } + + VULKAN_HPP_NAMESPACE::detail::resultCheck( + result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineBinariesKHRUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncomplete, VULKAN_HPP_NAMESPACE::Result::ePipelineBinaryMissingKHR } ); + std::vector, PipelineBinaryKHRAllocator> uniquePipelineBinaries; + uniquePipelineBinaries.reserve( pipelineBinaries.size() ); + ObjectDestroy deleter( *this, allocator, d ); + for ( auto const & pipelineBinary : pipelineBinaries ) + { + uniquePipelineBinaries.push_back( UniqueHandle( pipelineBinary, deleter ) ); + } + return ResultValue, PipelineBinaryKHRAllocator>>( + result, std::move( uniquePipelineBinaries ) ); + } + + template >::value, + int>::type> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue, PipelineBinaryKHRAllocator>> + Device::createPipelineBinariesKHRUnique( const VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR & createInfo, + Optional allocator, + PipelineBinaryKHRAllocator & pipelineBinaryKHRAllocator, + Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkCreatePipelineBinariesKHR && "Function requires " ); +# endif + + std::vector pipelineBinaries; + VULKAN_HPP_NAMESPACE::PipelineBinaryHandlesInfoKHR binaries; + VULKAN_HPP_NAMESPACE::Result result; + if ( createInfo.pKeysAndDataInfo ) + { + VULKAN_HPP_ASSERT( !createInfo.pipeline && !createInfo.pPipelineCreateInfo ); + pipelineBinaries.resize( createInfo.pKeysAndDataInfo->binaryCount ); + binaries.pipelineBinaryCount = createInfo.pKeysAndDataInfo->binaryCount; + binaries.pPipelineBinaries = pipelineBinaries.data(); + result = static_cast( d.vkCreatePipelineBinariesKHR( + m_device, + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + } + else + { + VULKAN_HPP_ASSERT( !createInfo.pipeline ^ !createInfo.pPipelineCreateInfo ); + result = static_cast( d.vkCreatePipelineBinariesKHR( + m_device, + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + pipelineBinaries.resize( binaries.pipelineBinaryCount ); + binaries.pPipelineBinaries = pipelineBinaries.data(); + result = static_cast( d.vkCreatePipelineBinariesKHR( + m_device, + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + } + } + + VULKAN_HPP_NAMESPACE::detail::resultCheck( + result, + VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineBinariesKHRUnique", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncomplete, VULKAN_HPP_NAMESPACE::Result::ePipelineBinaryMissingKHR } ); + std::vector, PipelineBinaryKHRAllocator> uniquePipelineBinaries( + pipelineBinaryKHRAllocator ); + uniquePipelineBinaries.reserve( pipelineBinaries.size() ); + ObjectDestroy deleter( *this, allocator, d ); + for ( auto const & pipelineBinary : pipelineBinaries ) + { + uniquePipelineBinaries.push_back( UniqueHandle( pipelineBinary, deleter ) ); + } + return ResultValue, PipelineBinaryKHRAllocator>>( + result, std::move( uniquePipelineBinaries ) ); + } +# endif /* VULKAN_HPP_NO_SMART_HANDLE */ +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_INLINE void Device::destroyPipelineBinaryKHR( VULKAN_HPP_NAMESPACE::PipelineBinaryKHR pipelineBinary, + const VULKAN_HPP_NAMESPACE::AllocationCallbacks * pAllocator, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkDestroyPipelineBinaryKHR( m_device, static_cast( pipelineBinary ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyPipelineBinaryKHR( VULKAN_HPP_NAMESPACE::PipelineBinaryKHR pipelineBinary, + Optional allocator, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkDestroyPipelineBinaryKHR && "Function requires " ); +# endif + + d.vkDestroyPipelineBinaryKHR( + m_device, + static_cast( pipelineBinary ), + reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PipelineBinaryKHR pipelineBinary, + const VULKAN_HPP_NAMESPACE::AllocationCallbacks * pAllocator, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkDestroyPipelineBinaryKHR( m_device, static_cast( pipelineBinary ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PipelineBinaryKHR pipelineBinary, + Optional allocator, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkDestroyPipelineBinaryKHR && "Function requires " ); +# endif + + d.vkDestroyPipelineBinaryKHR( + m_device, + static_cast( pipelineBinary ), + reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPipelineKeyKHR( const VULKAN_HPP_NAMESPACE::PipelineCreateInfoKHR * pPipelineCreateInfo, + VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR * pPipelineKey, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + return static_cast( d.vkGetPipelineKeyKHR( + m_device, reinterpret_cast( pPipelineCreateInfo ), reinterpret_cast( pPipelineKey ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType::type + Device::getPipelineKeyKHR( Optional pipelineCreateInfo, Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkGetPipelineKeyKHR && "Function requires " ); +# endif + + VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR pipelineKey; + VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetPipelineKeyKHR( + m_device, + reinterpret_cast( static_cast( pipelineCreateInfo ) ), + reinterpret_cast( &pipelineKey ) ) ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineKeyKHR" ); + + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( pipelineKey ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPipelineBinaryDataKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryDataInfoKHR * pInfo, + VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR * pPipelineBinaryKey, + size_t * pPipelineBinaryDataSize, + void * pPipelineBinaryData, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + return static_cast( d.vkGetPipelineBinaryDataKHR( m_device, + reinterpret_cast( pInfo ), + reinterpret_cast( pPipelineBinaryKey ), + pPipelineBinaryDataSize, + pPipelineBinaryData ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE + typename ResultValueType>>::type + Device::getPipelineBinaryDataKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryDataInfoKHR & info, Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkGetPipelineBinaryDataKHR && "Function requires " ); +# endif + + std::pair> data_; + VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR & pipelineBinaryKey = data_.first; + std::vector & pipelineBinaryData = data_.second; + size_t pipelineBinaryDataSize; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( d.vkGetPipelineBinaryDataKHR( m_device, + reinterpret_cast( &info ), + reinterpret_cast( &pipelineBinaryKey ), + &pipelineBinaryDataSize, + nullptr ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + pipelineBinaryData.resize( pipelineBinaryDataSize ); + result = static_cast( d.vkGetPipelineBinaryDataKHR( m_device, + reinterpret_cast( &info ), + reinterpret_cast( &pipelineBinaryKey ), + &pipelineBinaryDataSize, + reinterpret_cast( pipelineBinaryData.data() ) ) ); + } + + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineBinaryDataKHR" ); + + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data_ ) ); + } + + template ::value, int>::type> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE + typename ResultValueType>>::type + Device::getPipelineBinaryDataKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryDataInfoKHR & info, + Uint8_tAllocator & uint8_tAllocator, + Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkGetPipelineBinaryDataKHR && "Function requires " ); +# endif + + std::pair> data_( + std::piecewise_construct, std::forward_as_tuple(), std::forward_as_tuple( uint8_tAllocator ) ); + VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR & pipelineBinaryKey = data_.first; + std::vector & pipelineBinaryData = data_.second; + size_t pipelineBinaryDataSize; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( d.vkGetPipelineBinaryDataKHR( m_device, + reinterpret_cast( &info ), + reinterpret_cast( &pipelineBinaryKey ), + &pipelineBinaryDataSize, + nullptr ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + pipelineBinaryData.resize( pipelineBinaryDataSize ); + result = static_cast( d.vkGetPipelineBinaryDataKHR( m_device, + reinterpret_cast( &info ), + reinterpret_cast( &pipelineBinaryKey ), + &pipelineBinaryDataSize, + reinterpret_cast( pipelineBinaryData.data() ) ) ); + } + + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineBinaryDataKHR" ); + + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data_ ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_INLINE Result Device::releaseCapturedPipelineDataKHR( const VULKAN_HPP_NAMESPACE::ReleaseCapturedPipelineDataInfoKHR * pInfo, + const VULKAN_HPP_NAMESPACE::AllocationCallbacks * pAllocator, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + return static_cast( d.vkReleaseCapturedPipelineDataKHR( + m_device, reinterpret_cast( pInfo ), reinterpret_cast( pAllocator ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::releaseCapturedPipelineDataKHR( const VULKAN_HPP_NAMESPACE::ReleaseCapturedPipelineDataInfoKHR & info, + Optional allocator, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkReleaseCapturedPipelineDataKHR && "Function requires " ); +# endif + + d.vkReleaseCapturedPipelineDataKHR( + m_device, + reinterpret_cast( &info ), + reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + //=== VK_QCOM_tile_properties === template @@ -25836,9 +26281,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkSetLatencySleepModeNV( m_device, static_cast( swapchain ), reinterpret_cast( &sleepModeInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setLatencySleepModeNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setLatencySleepModeNV" ); - return createResultValueType( result ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -25901,19 +26346,48 @@ namespace VULKAN_HPP_NAMESPACE } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::GetLatencyMarkerInfoNV - Device::getLatencyTimingsNV( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector + Device::getLatencyTimingsNV( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) VULKAN_HPP_ASSERT( d.vkGetLatencyTimingsNV && "Function requires " ); # endif - VULKAN_HPP_NAMESPACE::GetLatencyMarkerInfoNV latencyMarkerInfo; + std::vector timings; + VULKAN_HPP_NAMESPACE::GetLatencyMarkerInfoNV latencyMarkerInfo; + d.vkGetLatencyTimingsNV( m_device, static_cast( swapchain ), reinterpret_cast( &latencyMarkerInfo ) ); + timings.resize( latencyMarkerInfo.timingCount ); + latencyMarkerInfo.pTimings = timings.data(); d.vkGetLatencyTimingsNV( m_device, static_cast( swapchain ), reinterpret_cast( &latencyMarkerInfo ) ); - return latencyMarkerInfo; + return timings; + } + + template < + typename LatencyTimingsFrameReportNVAllocator, + typename Dispatch, + typename std::enable_if::value, + int>::type> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector + Device::getLatencyTimingsNV( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, + LatencyTimingsFrameReportNVAllocator & latencyTimingsFrameReportNVAllocator, + Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkGetLatencyTimingsNV && "Function requires " ); +# endif + + std::vector timings( latencyTimingsFrameReportNVAllocator ); + VULKAN_HPP_NAMESPACE::GetLatencyMarkerInfoNV latencyMarkerInfo; + d.vkGetLatencyTimingsNV( m_device, static_cast( swapchain ), reinterpret_cast( &latencyMarkerInfo ) ); + timings.resize( latencyMarkerInfo.timingCount ); + latencyMarkerInfo.pTimings = timings.data(); + d.vkGetLatencyTimingsNV( m_device, static_cast( swapchain ), reinterpret_cast( &latencyMarkerInfo ) ); + + return timings; } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -25975,13 +26449,13 @@ namespace VULKAN_HPP_NAMESPACE m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCooperativeMatrixPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCooperativeMatrixPropertiesKHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template ( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCooperativeMatrixPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCooperativeMatrixPropertiesKHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { properties.resize( propertyCount ); } - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -26059,9 +26533,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ScreenBufferPropertiesQNX properties; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetScreenBufferPropertiesQNX( m_device, &buffer, reinterpret_cast( &properties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getScreenBufferPropertiesQNX" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getScreenBufferPropertiesQNX" ); - return createResultValueType( result, std::move( properties ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( properties ) ); } template @@ -26077,9 +26551,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ScreenBufferPropertiesQNX & properties = structureChain.template get(); VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetScreenBufferPropertiesQNX( m_device, &buffer, reinterpret_cast( &properties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getScreenBufferPropertiesQNX" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getScreenBufferPropertiesQNX" ); - return createResultValueType( result, std::move( structureChain ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( structureChain ) ); } # endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ #endif /*VK_USE_PLATFORM_SCREEN_QNX*/ @@ -26130,13 +26604,13 @@ namespace VULKAN_HPP_NAMESPACE d.vkGetPhysicalDeviceCalibrateableTimeDomainsKHR( m_physicalDevice, &timeDomainCount, reinterpret_cast( timeDomains.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCalibrateableTimeDomainsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCalibrateableTimeDomainsKHR" ); VULKAN_HPP_ASSERT( timeDomainCount <= timeDomains.size() ); if ( timeDomainCount < timeDomains.size() ) { timeDomains.resize( timeDomainCount ); } - return createResultValueType( result, std::move( timeDomains ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( timeDomains ) ); } template ( timeDomains.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCalibrateableTimeDomainsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCalibrateableTimeDomainsKHR" ); VULKAN_HPP_ASSERT( timeDomainCount <= timeDomains.size() ); if ( timeDomainCount < timeDomains.size() ) { timeDomains.resize( timeDomainCount ); } - return createResultValueType( result, std::move( timeDomains ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( timeDomains ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -26204,9 +26678,9 @@ namespace VULKAN_HPP_NAMESPACE uint64_t & maxDeviation = data_.second; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetCalibratedTimestampsKHR( m_device, timestampInfos.size(), reinterpret_cast( timestampInfos.data() ), timestamps.data(), &maxDeviation ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsKHR" ); - return createResultValueType( result, std::move( data_ ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data_ ) ); } template ( d.vkGetCalibratedTimestampsKHR( m_device, timestampInfos.size(), reinterpret_cast( timestampInfos.data() ), timestamps.data(), &maxDeviation ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsKHR" ); - return createResultValueType( result, std::move( data_ ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data_ ) ); } template @@ -26249,9 +26723,9 @@ namespace VULKAN_HPP_NAMESPACE uint64_t & maxDeviation = data_.second; VULKAN_HPP_NAMESPACE::Result result = static_cast( d.vkGetCalibratedTimestampsKHR( m_device, 1, reinterpret_cast( ×tampInfo ), ×tamp, &maxDeviation ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampKHR" ); - return createResultValueType( result, std::move( data_ ) ); + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( data_ ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ diff --git a/third_party/vulkan/vulkan_handles.hpp b/third_party/vulkan/vulkan_handles.hpp index 4208b8c..4670c46 100644 --- a/third_party/vulkan/vulkan_handles.hpp +++ b/third_party/vulkan/vulkan_handles.hpp @@ -8,6 +8,9 @@ #ifndef VULKAN_HANDLES_HPP #define VULKAN_HANDLES_HPP +// include-what-you-use: make sure, vulkan.hpp is used by code-completers +// IWYU pragma: private; include "vulkan.hpp" + namespace VULKAN_HPP_NAMESPACE { @@ -997,9 +1000,6 @@ namespace VULKAN_HPP_NAMESPACE struct PresentFrameTokenGGP; #endif /*VK_USE_PLATFORM_GGP*/ - //=== VK_NV_compute_shader_derivatives === - struct PhysicalDeviceComputeShaderDerivativesFeaturesNV; - //=== VK_NV_mesh_shader === struct PhysicalDeviceMeshShaderFeaturesNV; struct PhysicalDeviceMeshShaderPropertiesNV; @@ -1660,6 +1660,11 @@ namespace VULKAN_HPP_NAMESPACE struct PipelineCreateFlags2CreateInfoKHR; struct BufferUsageFlags2CreateInfoKHR; + //=== VK_AMD_anti_lag === + struct PhysicalDeviceAntiLagFeaturesAMD; + struct AntiLagDataAMD; + struct AntiLagPresentationInfoAMD; + //=== VK_KHR_ray_tracing_position_fetch === struct PhysicalDeviceRayTracingPositionFetchFeaturesKHR; @@ -1668,6 +1673,20 @@ namespace VULKAN_HPP_NAMESPACE struct PhysicalDeviceShaderObjectPropertiesEXT; struct ShaderCreateInfoEXT; + //=== VK_KHR_pipeline_binary === + struct PhysicalDevicePipelineBinaryFeaturesKHR; + struct PhysicalDevicePipelineBinaryPropertiesKHR; + struct DevicePipelineBinaryInternalCacheControlKHR; + struct PipelineBinaryKeyKHR; + struct PipelineBinaryDataKHR; + struct PipelineBinaryKeysAndDataKHR; + struct PipelineBinaryCreateInfoKHR; + struct PipelineBinaryInfoKHR; + struct ReleaseCapturedPipelineDataInfoKHR; + struct PipelineBinaryDataInfoKHR; + struct PipelineCreateInfoKHR; + struct PipelineBinaryHandlesInfoKHR; + //=== VK_QCOM_tile_properties === struct PhysicalDeviceTilePropertiesFeaturesQCOM; struct TilePropertiesQCOM; @@ -1695,6 +1714,10 @@ namespace VULKAN_HPP_NAMESPACE struct MutableDescriptorTypeCreateInfoEXT; using MutableDescriptorTypeCreateInfoVALVE = MutableDescriptorTypeCreateInfoEXT; + //=== VK_EXT_legacy_vertex_attributes === + struct PhysicalDeviceLegacyVertexAttributesFeaturesEXT; + struct PhysicalDeviceLegacyVertexAttributesPropertiesEXT; + //=== VK_EXT_layer_settings === struct LayerSettingsCreateInfoEXT; struct LayerSettingEXT; @@ -1729,6 +1752,11 @@ namespace VULKAN_HPP_NAMESPACE struct PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM; struct MultiviewPerViewRenderAreasRenderPassBeginInfoQCOM; + //=== VK_KHR_compute_shader_derivatives === + struct PhysicalDeviceComputeShaderDerivativesFeaturesKHR; + using PhysicalDeviceComputeShaderDerivativesFeaturesNV = PhysicalDeviceComputeShaderDerivativesFeaturesKHR; + struct PhysicalDeviceComputeShaderDerivativesPropertiesKHR; + //=== VK_KHR_video_decode_av1 === struct VideoDecodeAV1ProfileInfoKHR; struct VideoDecodeAV1CapabilitiesKHR; @@ -1823,12 +1851,33 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_NV_raw_access_chains === struct PhysicalDeviceRawAccessChainsFeaturesNV; + //=== VK_KHR_shader_relaxed_extended_instruction === + struct PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR; + + //=== VK_NV_command_buffer_inheritance === + struct PhysicalDeviceCommandBufferInheritanceFeaturesNV; + + //=== VK_KHR_maintenance7 === + struct PhysicalDeviceMaintenance7FeaturesKHR; + struct PhysicalDeviceMaintenance7PropertiesKHR; + struct PhysicalDeviceLayeredApiPropertiesListKHR; + struct PhysicalDeviceLayeredApiPropertiesKHR; + struct PhysicalDeviceLayeredApiVulkanPropertiesKHR; + //=== VK_NV_shader_atomic_float16_vector === struct PhysicalDeviceShaderAtomicFloat16VectorFeaturesNV; + //=== VK_EXT_shader_replicated_composites === + struct PhysicalDeviceShaderReplicatedCompositesFeaturesEXT; + //=== VK_NV_ray_tracing_validation === struct PhysicalDeviceRayTracingValidationFeaturesNV; + //=== VK_MESA_image_alignment_control === + struct PhysicalDeviceImageAlignmentControlFeaturesMESA; + struct PhysicalDeviceImageAlignmentControlPropertiesMESA; + struct ImageAlignmentControlCreateInfoMESA; + //=================================== //=== HANDLE forward declarations === //=================================== @@ -1929,6 +1978,9 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_shader_object === class ShaderEXT; + //=== VK_KHR_pipeline_binary === + class PipelineBinaryKHR; + #ifndef VULKAN_HPP_NO_SMART_HANDLE //====================== //=== UNIQUE HANDLEs === @@ -2384,6 +2436,16 @@ namespace VULKAN_HPP_NAMESPACE }; using UniqueShaderEXT = UniqueHandle; + + //=== VK_KHR_pipeline_binary === + template + class UniqueHandleTraits + { + public: + using deleter = ObjectDestroy; + }; + + using UniquePipelineBinaryKHR = UniqueHandle; #endif /*VULKAN_HPP_NO_SMART_HANDLE*/ //=============== @@ -2407,7 +2469,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSurfaceKHR; public: - VULKAN_HPP_CONSTEXPR SurfaceKHR() = default; + SurfaceKHR() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + SurfaceKHR( SurfaceKHR const & rhs ) = default; + SurfaceKHR & operator=( SurfaceKHR const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + SurfaceKHR( SurfaceKHR && rhs ) = default; + SurfaceKHR & operator=( SurfaceKHR && rhs ) = default; +#else + SurfaceKHR( SurfaceKHR && rhs ) VULKAN_HPP_NOEXCEPT : m_surfaceKHR( VULKAN_HPP_NAMESPACE::exchange( rhs.m_surfaceKHR, {} ) ) {} + + SurfaceKHR & operator=( SurfaceKHR && rhs ) VULKAN_HPP_NOEXCEPT + { + m_surfaceKHR = VULKAN_HPP_NAMESPACE::exchange( rhs.m_surfaceKHR, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR SurfaceKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -2477,11 +2554,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::SurfaceKHR; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::SurfaceKHR; }; +#endif template <> struct isVulkanHandleType @@ -2500,7 +2579,25 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDebugReportCallbackEXT; public: - VULKAN_HPP_CONSTEXPR DebugReportCallbackEXT() = default; + DebugReportCallbackEXT() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + DebugReportCallbackEXT( DebugReportCallbackEXT const & rhs ) = default; + DebugReportCallbackEXT & operator=( DebugReportCallbackEXT const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + DebugReportCallbackEXT( DebugReportCallbackEXT && rhs ) = default; + DebugReportCallbackEXT & operator=( DebugReportCallbackEXT && rhs ) = default; +#else + DebugReportCallbackEXT( DebugReportCallbackEXT && rhs ) VULKAN_HPP_NOEXCEPT + : m_debugReportCallbackEXT( VULKAN_HPP_NAMESPACE::exchange( rhs.m_debugReportCallbackEXT, {} ) ) + { + } + + DebugReportCallbackEXT & operator=( DebugReportCallbackEXT && rhs ) VULKAN_HPP_NOEXCEPT + { + m_debugReportCallbackEXT = VULKAN_HPP_NAMESPACE::exchange( rhs.m_debugReportCallbackEXT, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR DebugReportCallbackEXT( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -2573,11 +2670,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT; }; +#endif template <> struct isVulkanHandleType @@ -2596,7 +2695,25 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; public: - VULKAN_HPP_CONSTEXPR DebugUtilsMessengerEXT() = default; + DebugUtilsMessengerEXT() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + DebugUtilsMessengerEXT( DebugUtilsMessengerEXT const & rhs ) = default; + DebugUtilsMessengerEXT & operator=( DebugUtilsMessengerEXT const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + DebugUtilsMessengerEXT( DebugUtilsMessengerEXT && rhs ) = default; + DebugUtilsMessengerEXT & operator=( DebugUtilsMessengerEXT && rhs ) = default; +#else + DebugUtilsMessengerEXT( DebugUtilsMessengerEXT && rhs ) VULKAN_HPP_NOEXCEPT + : m_debugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::exchange( rhs.m_debugUtilsMessengerEXT, {} ) ) + { + } + + DebugUtilsMessengerEXT & operator=( DebugUtilsMessengerEXT && rhs ) VULKAN_HPP_NOEXCEPT + { + m_debugUtilsMessengerEXT = VULKAN_HPP_NAMESPACE::exchange( rhs.m_debugUtilsMessengerEXT, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR DebugUtilsMessengerEXT( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -2663,11 +2780,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT; }; +#endif template <> struct isVulkanHandleType @@ -2686,7 +2805,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDisplayKHR; public: - VULKAN_HPP_CONSTEXPR DisplayKHR() = default; + DisplayKHR() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + DisplayKHR( DisplayKHR const & rhs ) = default; + DisplayKHR & operator=( DisplayKHR const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + DisplayKHR( DisplayKHR && rhs ) = default; + DisplayKHR & operator=( DisplayKHR && rhs ) = default; +#else + DisplayKHR( DisplayKHR && rhs ) VULKAN_HPP_NOEXCEPT : m_displayKHR( VULKAN_HPP_NAMESPACE::exchange( rhs.m_displayKHR, {} ) ) {} + + DisplayKHR & operator=( DisplayKHR && rhs ) VULKAN_HPP_NOEXCEPT + { + m_displayKHR = VULKAN_HPP_NAMESPACE::exchange( rhs.m_displayKHR, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR DisplayKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -2756,11 +2890,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::DisplayKHR; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::DisplayKHR; }; +#endif template <> struct isVulkanHandleType @@ -2779,7 +2915,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSwapchainKHR; public: - VULKAN_HPP_CONSTEXPR SwapchainKHR() = default; + SwapchainKHR() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + SwapchainKHR( SwapchainKHR const & rhs ) = default; + SwapchainKHR & operator=( SwapchainKHR const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + SwapchainKHR( SwapchainKHR && rhs ) = default; + SwapchainKHR & operator=( SwapchainKHR && rhs ) = default; +#else + SwapchainKHR( SwapchainKHR && rhs ) VULKAN_HPP_NOEXCEPT : m_swapchainKHR( VULKAN_HPP_NAMESPACE::exchange( rhs.m_swapchainKHR, {} ) ) {} + + SwapchainKHR & operator=( SwapchainKHR && rhs ) VULKAN_HPP_NOEXCEPT + { + m_swapchainKHR = VULKAN_HPP_NAMESPACE::exchange( rhs.m_swapchainKHR, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR SwapchainKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -2849,11 +3000,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::SwapchainKHR; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::SwapchainKHR; }; +#endif template <> struct isVulkanHandleType @@ -2872,7 +3025,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSemaphore; public: - VULKAN_HPP_CONSTEXPR Semaphore() = default; + Semaphore() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + Semaphore( Semaphore const & rhs ) = default; + Semaphore & operator=( Semaphore const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + Semaphore( Semaphore && rhs ) = default; + Semaphore & operator=( Semaphore && rhs ) = default; +#else + Semaphore( Semaphore && rhs ) VULKAN_HPP_NOEXCEPT : m_semaphore( VULKAN_HPP_NAMESPACE::exchange( rhs.m_semaphore, {} ) ) {} + + Semaphore & operator=( Semaphore && rhs ) VULKAN_HPP_NOEXCEPT + { + m_semaphore = VULKAN_HPP_NAMESPACE::exchange( rhs.m_semaphore, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR Semaphore( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -2942,11 +3110,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::Semaphore; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::Semaphore; }; +#endif template <> struct isVulkanHandleType @@ -2965,7 +3135,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eFence; public: - VULKAN_HPP_CONSTEXPR Fence() = default; + Fence() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + Fence( Fence const & rhs ) = default; + Fence & operator=( Fence const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + Fence( Fence && rhs ) = default; + Fence & operator=( Fence && rhs ) = default; +#else + Fence( Fence && rhs ) VULKAN_HPP_NOEXCEPT : m_fence( VULKAN_HPP_NAMESPACE::exchange( rhs.m_fence, {} ) ) {} + + Fence & operator=( Fence && rhs ) VULKAN_HPP_NOEXCEPT + { + m_fence = VULKAN_HPP_NAMESPACE::exchange( rhs.m_fence, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR Fence( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -3035,11 +3220,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::Fence; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::Fence; }; +#endif template <> struct isVulkanHandleType @@ -3058,7 +3245,25 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; public: - VULKAN_HPP_CONSTEXPR PerformanceConfigurationINTEL() = default; + PerformanceConfigurationINTEL() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + PerformanceConfigurationINTEL( PerformanceConfigurationINTEL const & rhs ) = default; + PerformanceConfigurationINTEL & operator=( PerformanceConfigurationINTEL const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + PerformanceConfigurationINTEL( PerformanceConfigurationINTEL && rhs ) = default; + PerformanceConfigurationINTEL & operator=( PerformanceConfigurationINTEL && rhs ) = default; +#else + PerformanceConfigurationINTEL( PerformanceConfigurationINTEL && rhs ) VULKAN_HPP_NOEXCEPT + : m_performanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::exchange( rhs.m_performanceConfigurationINTEL, {} ) ) + { + } + + PerformanceConfigurationINTEL & operator=( PerformanceConfigurationINTEL && rhs ) VULKAN_HPP_NOEXCEPT + { + m_performanceConfigurationINTEL = VULKAN_HPP_NAMESPACE::exchange( rhs.m_performanceConfigurationINTEL, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR PerformanceConfigurationINTEL( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -3125,11 +3330,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL; }; +#endif template <> struct isVulkanHandleType @@ -3148,7 +3355,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eQueryPool; public: - VULKAN_HPP_CONSTEXPR QueryPool() = default; + QueryPool() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + QueryPool( QueryPool const & rhs ) = default; + QueryPool & operator=( QueryPool const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + QueryPool( QueryPool && rhs ) = default; + QueryPool & operator=( QueryPool && rhs ) = default; +#else + QueryPool( QueryPool && rhs ) VULKAN_HPP_NOEXCEPT : m_queryPool( VULKAN_HPP_NAMESPACE::exchange( rhs.m_queryPool, {} ) ) {} + + QueryPool & operator=( QueryPool && rhs ) VULKAN_HPP_NOEXCEPT + { + m_queryPool = VULKAN_HPP_NAMESPACE::exchange( rhs.m_queryPool, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR QueryPool( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -3218,11 +3440,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::QueryPool; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::QueryPool; }; +#endif template <> struct isVulkanHandleType @@ -3241,7 +3465,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eBuffer; public: - VULKAN_HPP_CONSTEXPR Buffer() = default; + Buffer() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + Buffer( Buffer const & rhs ) = default; + Buffer & operator=( Buffer const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + Buffer( Buffer && rhs ) = default; + Buffer & operator=( Buffer && rhs ) = default; +#else + Buffer( Buffer && rhs ) VULKAN_HPP_NOEXCEPT : m_buffer( VULKAN_HPP_NAMESPACE::exchange( rhs.m_buffer, {} ) ) {} + + Buffer & operator=( Buffer && rhs ) VULKAN_HPP_NOEXCEPT + { + m_buffer = VULKAN_HPP_NAMESPACE::exchange( rhs.m_buffer, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR Buffer( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -3311,11 +3550,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::Buffer; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::Buffer; }; +#endif template <> struct isVulkanHandleType @@ -3334,7 +3575,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::ePipelineLayout; public: - VULKAN_HPP_CONSTEXPR PipelineLayout() = default; + PipelineLayout() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + PipelineLayout( PipelineLayout const & rhs ) = default; + PipelineLayout & operator=( PipelineLayout const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + PipelineLayout( PipelineLayout && rhs ) = default; + PipelineLayout & operator=( PipelineLayout && rhs ) = default; +#else + PipelineLayout( PipelineLayout && rhs ) VULKAN_HPP_NOEXCEPT : m_pipelineLayout( VULKAN_HPP_NAMESPACE::exchange( rhs.m_pipelineLayout, {} ) ) {} + + PipelineLayout & operator=( PipelineLayout && rhs ) VULKAN_HPP_NOEXCEPT + { + m_pipelineLayout = VULKAN_HPP_NAMESPACE::exchange( rhs.m_pipelineLayout, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR PipelineLayout( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -3404,11 +3660,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::PipelineLayout; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::PipelineLayout; }; +#endif template <> struct isVulkanHandleType @@ -3427,7 +3685,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDescriptorSet; public: - VULKAN_HPP_CONSTEXPR DescriptorSet() = default; + DescriptorSet() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + DescriptorSet( DescriptorSet const & rhs ) = default; + DescriptorSet & operator=( DescriptorSet const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + DescriptorSet( DescriptorSet && rhs ) = default; + DescriptorSet & operator=( DescriptorSet && rhs ) = default; +#else + DescriptorSet( DescriptorSet && rhs ) VULKAN_HPP_NOEXCEPT : m_descriptorSet( VULKAN_HPP_NAMESPACE::exchange( rhs.m_descriptorSet, {} ) ) {} + + DescriptorSet & operator=( DescriptorSet && rhs ) VULKAN_HPP_NOEXCEPT + { + m_descriptorSet = VULKAN_HPP_NAMESPACE::exchange( rhs.m_descriptorSet, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR DescriptorSet( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -3497,11 +3770,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::DescriptorSet; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::DescriptorSet; }; +#endif template <> struct isVulkanHandleType @@ -3520,7 +3795,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eImageView; public: - VULKAN_HPP_CONSTEXPR ImageView() = default; + ImageView() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + ImageView( ImageView const & rhs ) = default; + ImageView & operator=( ImageView const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + ImageView( ImageView && rhs ) = default; + ImageView & operator=( ImageView && rhs ) = default; +#else + ImageView( ImageView && rhs ) VULKAN_HPP_NOEXCEPT : m_imageView( VULKAN_HPP_NAMESPACE::exchange( rhs.m_imageView, {} ) ) {} + + ImageView & operator=( ImageView && rhs ) VULKAN_HPP_NOEXCEPT + { + m_imageView = VULKAN_HPP_NAMESPACE::exchange( rhs.m_imageView, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR ImageView( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -3590,11 +3880,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::ImageView; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::ImageView; }; +#endif template <> struct isVulkanHandleType @@ -3613,7 +3905,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::ePipeline; public: - VULKAN_HPP_CONSTEXPR Pipeline() = default; + Pipeline() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + Pipeline( Pipeline const & rhs ) = default; + Pipeline & operator=( Pipeline const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + Pipeline( Pipeline && rhs ) = default; + Pipeline & operator=( Pipeline && rhs ) = default; +#else + Pipeline( Pipeline && rhs ) VULKAN_HPP_NOEXCEPT : m_pipeline( VULKAN_HPP_NAMESPACE::exchange( rhs.m_pipeline, {} ) ) {} + + Pipeline & operator=( Pipeline && rhs ) VULKAN_HPP_NOEXCEPT + { + m_pipeline = VULKAN_HPP_NAMESPACE::exchange( rhs.m_pipeline, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR Pipeline( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -3683,11 +3990,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::Pipeline; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::Pipeline; }; +#endif template <> struct isVulkanHandleType @@ -3706,7 +4015,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; public: - VULKAN_HPP_CONSTEXPR ShaderEXT() = default; + ShaderEXT() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + ShaderEXT( ShaderEXT const & rhs ) = default; + ShaderEXT & operator=( ShaderEXT const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + ShaderEXT( ShaderEXT && rhs ) = default; + ShaderEXT & operator=( ShaderEXT && rhs ) = default; +#else + ShaderEXT( ShaderEXT && rhs ) VULKAN_HPP_NOEXCEPT : m_shaderEXT( VULKAN_HPP_NAMESPACE::exchange( rhs.m_shaderEXT, {} ) ) {} + + ShaderEXT & operator=( ShaderEXT && rhs ) VULKAN_HPP_NOEXCEPT + { + m_shaderEXT = VULKAN_HPP_NAMESPACE::exchange( rhs.m_shaderEXT, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR ShaderEXT( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -3770,11 +4094,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::ShaderEXT; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::ShaderEXT; }; +#endif template <> struct isVulkanHandleType @@ -3793,7 +4119,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eImage; public: - VULKAN_HPP_CONSTEXPR Image() = default; + Image() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + Image( Image const & rhs ) = default; + Image & operator=( Image const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + Image( Image && rhs ) = default; + Image & operator=( Image && rhs ) = default; +#else + Image( Image && rhs ) VULKAN_HPP_NOEXCEPT : m_image( VULKAN_HPP_NAMESPACE::exchange( rhs.m_image, {} ) ) {} + + Image & operator=( Image && rhs ) VULKAN_HPP_NOEXCEPT + { + m_image = VULKAN_HPP_NAMESPACE::exchange( rhs.m_image, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR Image( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -3863,11 +4204,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::Image; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::Image; }; +#endif template <> struct isVulkanHandleType @@ -3886,7 +4229,25 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eAccelerationStructureNV; public: - VULKAN_HPP_CONSTEXPR AccelerationStructureNV() = default; + AccelerationStructureNV() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + AccelerationStructureNV( AccelerationStructureNV const & rhs ) = default; + AccelerationStructureNV & operator=( AccelerationStructureNV const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + AccelerationStructureNV( AccelerationStructureNV && rhs ) = default; + AccelerationStructureNV & operator=( AccelerationStructureNV && rhs ) = default; +#else + AccelerationStructureNV( AccelerationStructureNV && rhs ) VULKAN_HPP_NOEXCEPT + : m_accelerationStructureNV( VULKAN_HPP_NAMESPACE::exchange( rhs.m_accelerationStructureNV, {} ) ) + { + } + + AccelerationStructureNV & operator=( AccelerationStructureNV && rhs ) VULKAN_HPP_NOEXCEPT + { + m_accelerationStructureNV = VULKAN_HPP_NAMESPACE::exchange( rhs.m_accelerationStructureNV, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR AccelerationStructureNV( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -3959,11 +4320,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::AccelerationStructureNV; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::AccelerationStructureNV; }; +#endif template <> struct isVulkanHandleType @@ -3982,7 +4345,25 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; public: - VULKAN_HPP_CONSTEXPR OpticalFlowSessionNV() = default; + OpticalFlowSessionNV() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + OpticalFlowSessionNV( OpticalFlowSessionNV const & rhs ) = default; + OpticalFlowSessionNV & operator=( OpticalFlowSessionNV const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + OpticalFlowSessionNV( OpticalFlowSessionNV && rhs ) = default; + OpticalFlowSessionNV & operator=( OpticalFlowSessionNV && rhs ) = default; +#else + OpticalFlowSessionNV( OpticalFlowSessionNV && rhs ) VULKAN_HPP_NOEXCEPT + : m_opticalFlowSessionNV( VULKAN_HPP_NAMESPACE::exchange( rhs.m_opticalFlowSessionNV, {} ) ) + { + } + + OpticalFlowSessionNV & operator=( OpticalFlowSessionNV && rhs ) VULKAN_HPP_NOEXCEPT + { + m_opticalFlowSessionNV = VULKAN_HPP_NAMESPACE::exchange( rhs.m_opticalFlowSessionNV, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR OpticalFlowSessionNV( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -4049,11 +4430,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::OpticalFlowSessionNV; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::OpticalFlowSessionNV; }; +#endif template <> struct isVulkanHandleType @@ -4072,7 +4455,25 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDescriptorUpdateTemplate; public: - VULKAN_HPP_CONSTEXPR DescriptorUpdateTemplate() = default; + DescriptorUpdateTemplate() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + DescriptorUpdateTemplate( DescriptorUpdateTemplate const & rhs ) = default; + DescriptorUpdateTemplate & operator=( DescriptorUpdateTemplate const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + DescriptorUpdateTemplate( DescriptorUpdateTemplate && rhs ) = default; + DescriptorUpdateTemplate & operator=( DescriptorUpdateTemplate && rhs ) = default; +#else + DescriptorUpdateTemplate( DescriptorUpdateTemplate && rhs ) VULKAN_HPP_NOEXCEPT + : m_descriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::exchange( rhs.m_descriptorUpdateTemplate, {} ) ) + { + } + + DescriptorUpdateTemplate & operator=( DescriptorUpdateTemplate && rhs ) VULKAN_HPP_NOEXCEPT + { + m_descriptorUpdateTemplate = VULKAN_HPP_NAMESPACE::exchange( rhs.m_descriptorUpdateTemplate, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR DescriptorUpdateTemplate( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -4145,11 +4546,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate; }; +#endif template <> struct isVulkanHandleType @@ -4170,7 +4573,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eEvent; public: - VULKAN_HPP_CONSTEXPR Event() = default; + Event() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + Event( Event const & rhs ) = default; + Event & operator=( Event const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + Event( Event && rhs ) = default; + Event & operator=( Event && rhs ) = default; +#else + Event( Event && rhs ) VULKAN_HPP_NOEXCEPT : m_event( VULKAN_HPP_NAMESPACE::exchange( rhs.m_event, {} ) ) {} + + Event & operator=( Event && rhs ) VULKAN_HPP_NOEXCEPT + { + m_event = VULKAN_HPP_NAMESPACE::exchange( rhs.m_event, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR Event( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -4240,11 +4658,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::Event; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::Event; }; +#endif template <> struct isVulkanHandleType @@ -4263,7 +4683,25 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eAccelerationStructureKHR; public: - VULKAN_HPP_CONSTEXPR AccelerationStructureKHR() = default; + AccelerationStructureKHR() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + AccelerationStructureKHR( AccelerationStructureKHR const & rhs ) = default; + AccelerationStructureKHR & operator=( AccelerationStructureKHR const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + AccelerationStructureKHR( AccelerationStructureKHR && rhs ) = default; + AccelerationStructureKHR & operator=( AccelerationStructureKHR && rhs ) = default; +#else + AccelerationStructureKHR( AccelerationStructureKHR && rhs ) VULKAN_HPP_NOEXCEPT + : m_accelerationStructureKHR( VULKAN_HPP_NAMESPACE::exchange( rhs.m_accelerationStructureKHR, {} ) ) + { + } + + AccelerationStructureKHR & operator=( AccelerationStructureKHR && rhs ) VULKAN_HPP_NOEXCEPT + { + m_accelerationStructureKHR = VULKAN_HPP_NAMESPACE::exchange( rhs.m_accelerationStructureKHR, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR AccelerationStructureKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -4336,11 +4774,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::AccelerationStructureKHR; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::AccelerationStructureKHR; }; +#endif template <> struct isVulkanHandleType @@ -4359,7 +4799,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; public: - VULKAN_HPP_CONSTEXPR MicromapEXT() = default; + MicromapEXT() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + MicromapEXT( MicromapEXT const & rhs ) = default; + MicromapEXT & operator=( MicromapEXT const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + MicromapEXT( MicromapEXT && rhs ) = default; + MicromapEXT & operator=( MicromapEXT && rhs ) = default; +#else + MicromapEXT( MicromapEXT && rhs ) VULKAN_HPP_NOEXCEPT : m_micromapEXT( VULKAN_HPP_NAMESPACE::exchange( rhs.m_micromapEXT, {} ) ) {} + + MicromapEXT & operator=( MicromapEXT && rhs ) VULKAN_HPP_NOEXCEPT + { + m_micromapEXT = VULKAN_HPP_NAMESPACE::exchange( rhs.m_micromapEXT, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR MicromapEXT( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -4423,11 +4878,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::MicromapEXT; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::MicromapEXT; }; +#endif template <> struct isVulkanHandleType @@ -4446,7 +4903,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eCommandBuffer; public: - VULKAN_HPP_CONSTEXPR CommandBuffer() = default; + CommandBuffer() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + CommandBuffer( CommandBuffer const & rhs ) = default; + CommandBuffer & operator=( CommandBuffer const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + CommandBuffer( CommandBuffer && rhs ) = default; + CommandBuffer & operator=( CommandBuffer && rhs ) = default; +#else + CommandBuffer( CommandBuffer && rhs ) VULKAN_HPP_NOEXCEPT : m_commandBuffer( VULKAN_HPP_NAMESPACE::exchange( rhs.m_commandBuffer, {} ) ) {} + + CommandBuffer & operator=( CommandBuffer && rhs ) VULKAN_HPP_NOEXCEPT + { + m_commandBuffer = VULKAN_HPP_NAMESPACE::exchange( rhs.m_commandBuffer, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR CommandBuffer( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -5950,11 +6422,11 @@ namespace VULKAN_HPP_NAMESPACE #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - void setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR * pLocationInfo, + void setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR * pInputAttachmentIndexInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - void setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR & locationInfo, + void setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR & inputAttachmentIndexInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -6817,11 +7289,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::CommandBuffer; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::CommandBuffer; }; +#endif template <> struct isVulkanHandleType @@ -6840,7 +7314,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDeviceMemory; public: - VULKAN_HPP_CONSTEXPR DeviceMemory() = default; + DeviceMemory() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + DeviceMemory( DeviceMemory const & rhs ) = default; + DeviceMemory & operator=( DeviceMemory const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + DeviceMemory( DeviceMemory && rhs ) = default; + DeviceMemory & operator=( DeviceMemory && rhs ) = default; +#else + DeviceMemory( DeviceMemory && rhs ) VULKAN_HPP_NOEXCEPT : m_deviceMemory( VULKAN_HPP_NAMESPACE::exchange( rhs.m_deviceMemory, {} ) ) {} + + DeviceMemory & operator=( DeviceMemory && rhs ) VULKAN_HPP_NOEXCEPT + { + m_deviceMemory = VULKAN_HPP_NAMESPACE::exchange( rhs.m_deviceMemory, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR DeviceMemory( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -6863,7 +7352,7 @@ namespace VULKAN_HPP_NAMESPACE #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) auto operator<=>( DeviceMemory const & ) const = default; #else - bool operator==( DeviceMemory const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( DeviceMemory const & rhs ) const VULKAN_HPP_NOEXCEPT { return m_deviceMemory == rhs.m_deviceMemory; } @@ -6910,11 +7399,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::DeviceMemory; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::DeviceMemory; }; +#endif template <> struct isVulkanHandleType @@ -6933,7 +7424,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; public: - VULKAN_HPP_CONSTEXPR VideoSessionKHR() = default; + VideoSessionKHR() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + VideoSessionKHR( VideoSessionKHR const & rhs ) = default; + VideoSessionKHR & operator=( VideoSessionKHR const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + VideoSessionKHR( VideoSessionKHR && rhs ) = default; + VideoSessionKHR & operator=( VideoSessionKHR && rhs ) = default; +#else + VideoSessionKHR( VideoSessionKHR && rhs ) VULKAN_HPP_NOEXCEPT : m_videoSessionKHR( VULKAN_HPP_NAMESPACE::exchange( rhs.m_videoSessionKHR, {} ) ) {} + + VideoSessionKHR & operator=( VideoSessionKHR && rhs ) VULKAN_HPP_NOEXCEPT + { + m_videoSessionKHR = VULKAN_HPP_NAMESPACE::exchange( rhs.m_videoSessionKHR, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR VideoSessionKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -6997,11 +7503,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::VideoSessionKHR; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::VideoSessionKHR; }; +#endif template <> struct isVulkanHandleType @@ -7020,7 +7528,25 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; public: - VULKAN_HPP_CONSTEXPR DeferredOperationKHR() = default; + DeferredOperationKHR() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + DeferredOperationKHR( DeferredOperationKHR const & rhs ) = default; + DeferredOperationKHR & operator=( DeferredOperationKHR const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + DeferredOperationKHR( DeferredOperationKHR && rhs ) = default; + DeferredOperationKHR & operator=( DeferredOperationKHR && rhs ) = default; +#else + DeferredOperationKHR( DeferredOperationKHR && rhs ) VULKAN_HPP_NOEXCEPT + : m_deferredOperationKHR( VULKAN_HPP_NAMESPACE::exchange( rhs.m_deferredOperationKHR, {} ) ) + { + } + + DeferredOperationKHR & operator=( DeferredOperationKHR && rhs ) VULKAN_HPP_NOEXCEPT + { + m_deferredOperationKHR = VULKAN_HPP_NAMESPACE::exchange( rhs.m_deferredOperationKHR, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR DeferredOperationKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -7087,11 +7613,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::DeferredOperationKHR; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::DeferredOperationKHR; }; +#endif template <> struct isVulkanHandleType @@ -7111,7 +7639,25 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eBufferCollectionFUCHSIA; public: - VULKAN_HPP_CONSTEXPR BufferCollectionFUCHSIA() = default; + BufferCollectionFUCHSIA() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + BufferCollectionFUCHSIA( BufferCollectionFUCHSIA const & rhs ) = default; + BufferCollectionFUCHSIA & operator=( BufferCollectionFUCHSIA const & rhs ) = default; + +# if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + BufferCollectionFUCHSIA( BufferCollectionFUCHSIA && rhs ) = default; + BufferCollectionFUCHSIA & operator=( BufferCollectionFUCHSIA && rhs ) = default; +# else + BufferCollectionFUCHSIA( BufferCollectionFUCHSIA && rhs ) VULKAN_HPP_NOEXCEPT + : m_bufferCollectionFUCHSIA( VULKAN_HPP_NAMESPACE::exchange( rhs.m_bufferCollectionFUCHSIA, {} ) ) + { + } + + BufferCollectionFUCHSIA & operator=( BufferCollectionFUCHSIA && rhs ) VULKAN_HPP_NOEXCEPT + { + m_bufferCollectionFUCHSIA = VULKAN_HPP_NAMESPACE::exchange( rhs.m_bufferCollectionFUCHSIA, {} ); + return *this; + } +# endif VULKAN_HPP_CONSTEXPR BufferCollectionFUCHSIA( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -7184,11 +7730,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::BufferCollectionFUCHSIA; }; +# if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::BufferCollectionFUCHSIA; }; +# endif template <> struct isVulkanHandleType @@ -7208,7 +7756,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eBufferView; public: - VULKAN_HPP_CONSTEXPR BufferView() = default; + BufferView() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + BufferView( BufferView const & rhs ) = default; + BufferView & operator=( BufferView const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + BufferView( BufferView && rhs ) = default; + BufferView & operator=( BufferView && rhs ) = default; +#else + BufferView( BufferView && rhs ) VULKAN_HPP_NOEXCEPT : m_bufferView( VULKAN_HPP_NAMESPACE::exchange( rhs.m_bufferView, {} ) ) {} + + BufferView & operator=( BufferView && rhs ) VULKAN_HPP_NOEXCEPT + { + m_bufferView = VULKAN_HPP_NAMESPACE::exchange( rhs.m_bufferView, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR BufferView( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -7278,11 +7841,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::BufferView; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::BufferView; }; +#endif template <> struct isVulkanHandleType @@ -7301,7 +7866,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eCommandPool; public: - VULKAN_HPP_CONSTEXPR CommandPool() = default; + CommandPool() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + CommandPool( CommandPool const & rhs ) = default; + CommandPool & operator=( CommandPool const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + CommandPool( CommandPool && rhs ) = default; + CommandPool & operator=( CommandPool && rhs ) = default; +#else + CommandPool( CommandPool && rhs ) VULKAN_HPP_NOEXCEPT : m_commandPool( VULKAN_HPP_NAMESPACE::exchange( rhs.m_commandPool, {} ) ) {} + + CommandPool & operator=( CommandPool && rhs ) VULKAN_HPP_NOEXCEPT + { + m_commandPool = VULKAN_HPP_NAMESPACE::exchange( rhs.m_commandPool, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR CommandPool( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -7371,11 +7951,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::CommandPool; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::CommandPool; }; +#endif template <> struct isVulkanHandleType @@ -7394,7 +7976,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::ePipelineCache; public: - VULKAN_HPP_CONSTEXPR PipelineCache() = default; + PipelineCache() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + PipelineCache( PipelineCache const & rhs ) = default; + PipelineCache & operator=( PipelineCache const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + PipelineCache( PipelineCache && rhs ) = default; + PipelineCache & operator=( PipelineCache && rhs ) = default; +#else + PipelineCache( PipelineCache && rhs ) VULKAN_HPP_NOEXCEPT : m_pipelineCache( VULKAN_HPP_NAMESPACE::exchange( rhs.m_pipelineCache, {} ) ) {} + + PipelineCache & operator=( PipelineCache && rhs ) VULKAN_HPP_NOEXCEPT + { + m_pipelineCache = VULKAN_HPP_NAMESPACE::exchange( rhs.m_pipelineCache, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR PipelineCache( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -7464,11 +8061,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::PipelineCache; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::PipelineCache; }; +#endif template <> struct isVulkanHandleType @@ -7487,7 +8086,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eCuFunctionNVX; public: - VULKAN_HPP_CONSTEXPR CuFunctionNVX() = default; + CuFunctionNVX() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + CuFunctionNVX( CuFunctionNVX const & rhs ) = default; + CuFunctionNVX & operator=( CuFunctionNVX const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + CuFunctionNVX( CuFunctionNVX && rhs ) = default; + CuFunctionNVX & operator=( CuFunctionNVX && rhs ) = default; +#else + CuFunctionNVX( CuFunctionNVX && rhs ) VULKAN_HPP_NOEXCEPT : m_cuFunctionNVX( VULKAN_HPP_NAMESPACE::exchange( rhs.m_cuFunctionNVX, {} ) ) {} + + CuFunctionNVX & operator=( CuFunctionNVX && rhs ) VULKAN_HPP_NOEXCEPT + { + m_cuFunctionNVX = VULKAN_HPP_NAMESPACE::exchange( rhs.m_cuFunctionNVX, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR CuFunctionNVX( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -7557,11 +8171,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::CuFunctionNVX; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::CuFunctionNVX; }; +#endif template <> struct isVulkanHandleType @@ -7580,7 +8196,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eCuModuleNVX; public: - VULKAN_HPP_CONSTEXPR CuModuleNVX() = default; + CuModuleNVX() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + CuModuleNVX( CuModuleNVX const & rhs ) = default; + CuModuleNVX & operator=( CuModuleNVX const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + CuModuleNVX( CuModuleNVX && rhs ) = default; + CuModuleNVX & operator=( CuModuleNVX && rhs ) = default; +#else + CuModuleNVX( CuModuleNVX && rhs ) VULKAN_HPP_NOEXCEPT : m_cuModuleNVX( VULKAN_HPP_NAMESPACE::exchange( rhs.m_cuModuleNVX, {} ) ) {} + + CuModuleNVX & operator=( CuModuleNVX && rhs ) VULKAN_HPP_NOEXCEPT + { + m_cuModuleNVX = VULKAN_HPP_NAMESPACE::exchange( rhs.m_cuModuleNVX, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR CuModuleNVX( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -7650,11 +8281,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::CuModuleNVX; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::CuModuleNVX; }; +#endif template <> struct isVulkanHandleType @@ -7674,7 +8307,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eCudaFunctionNV; public: - VULKAN_HPP_CONSTEXPR CudaFunctionNV() = default; + CudaFunctionNV() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + CudaFunctionNV( CudaFunctionNV const & rhs ) = default; + CudaFunctionNV & operator=( CudaFunctionNV const & rhs ) = default; + +# if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + CudaFunctionNV( CudaFunctionNV && rhs ) = default; + CudaFunctionNV & operator=( CudaFunctionNV && rhs ) = default; +# else + CudaFunctionNV( CudaFunctionNV && rhs ) VULKAN_HPP_NOEXCEPT : m_cudaFunctionNV( VULKAN_HPP_NAMESPACE::exchange( rhs.m_cudaFunctionNV, {} ) ) {} + + CudaFunctionNV & operator=( CudaFunctionNV && rhs ) VULKAN_HPP_NOEXCEPT + { + m_cudaFunctionNV = VULKAN_HPP_NAMESPACE::exchange( rhs.m_cudaFunctionNV, {} ); + return *this; + } +# endif VULKAN_HPP_CONSTEXPR CudaFunctionNV( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -7744,11 +8392,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::CudaFunctionNV; }; +# if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::CudaFunctionNV; }; +# endif template <> struct isVulkanHandleType @@ -7769,7 +8419,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eCudaModuleNV; public: - VULKAN_HPP_CONSTEXPR CudaModuleNV() = default; + CudaModuleNV() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + CudaModuleNV( CudaModuleNV const & rhs ) = default; + CudaModuleNV & operator=( CudaModuleNV const & rhs ) = default; + +# if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + CudaModuleNV( CudaModuleNV && rhs ) = default; + CudaModuleNV & operator=( CudaModuleNV && rhs ) = default; +# else + CudaModuleNV( CudaModuleNV && rhs ) VULKAN_HPP_NOEXCEPT : m_cudaModuleNV( VULKAN_HPP_NAMESPACE::exchange( rhs.m_cudaModuleNV, {} ) ) {} + + CudaModuleNV & operator=( CudaModuleNV && rhs ) VULKAN_HPP_NOEXCEPT + { + m_cudaModuleNV = VULKAN_HPP_NAMESPACE::exchange( rhs.m_cudaModuleNV, {} ); + return *this; + } +# endif VULKAN_HPP_CONSTEXPR CudaModuleNV( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -7839,11 +8504,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::CudaModuleNV; }; +# if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::CudaModuleNV; }; +# endif template <> struct isVulkanHandleType @@ -7863,7 +8530,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDescriptorPool; public: - VULKAN_HPP_CONSTEXPR DescriptorPool() = default; + DescriptorPool() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + DescriptorPool( DescriptorPool const & rhs ) = default; + DescriptorPool & operator=( DescriptorPool const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + DescriptorPool( DescriptorPool && rhs ) = default; + DescriptorPool & operator=( DescriptorPool && rhs ) = default; +#else + DescriptorPool( DescriptorPool && rhs ) VULKAN_HPP_NOEXCEPT : m_descriptorPool( VULKAN_HPP_NAMESPACE::exchange( rhs.m_descriptorPool, {} ) ) {} + + DescriptorPool & operator=( DescriptorPool && rhs ) VULKAN_HPP_NOEXCEPT + { + m_descriptorPool = VULKAN_HPP_NAMESPACE::exchange( rhs.m_descriptorPool, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR DescriptorPool( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -7933,11 +8615,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::DescriptorPool; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::DescriptorPool; }; +#endif template <> struct isVulkanHandleType @@ -7956,7 +8640,25 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDescriptorSetLayout; public: - VULKAN_HPP_CONSTEXPR DescriptorSetLayout() = default; + DescriptorSetLayout() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + DescriptorSetLayout( DescriptorSetLayout const & rhs ) = default; + DescriptorSetLayout & operator=( DescriptorSetLayout const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + DescriptorSetLayout( DescriptorSetLayout && rhs ) = default; + DescriptorSetLayout & operator=( DescriptorSetLayout && rhs ) = default; +#else + DescriptorSetLayout( DescriptorSetLayout && rhs ) VULKAN_HPP_NOEXCEPT + : m_descriptorSetLayout( VULKAN_HPP_NAMESPACE::exchange( rhs.m_descriptorSetLayout, {} ) ) + { + } + + DescriptorSetLayout & operator=( DescriptorSetLayout && rhs ) VULKAN_HPP_NOEXCEPT + { + m_descriptorSetLayout = VULKAN_HPP_NAMESPACE::exchange( rhs.m_descriptorSetLayout, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR DescriptorSetLayout( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -8029,11 +8731,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::DescriptorSetLayout; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::DescriptorSetLayout; }; +#endif template <> struct isVulkanHandleType @@ -8052,7 +8756,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eFramebuffer; public: - VULKAN_HPP_CONSTEXPR Framebuffer() = default; + Framebuffer() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + Framebuffer( Framebuffer const & rhs ) = default; + Framebuffer & operator=( Framebuffer const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + Framebuffer( Framebuffer && rhs ) = default; + Framebuffer & operator=( Framebuffer && rhs ) = default; +#else + Framebuffer( Framebuffer && rhs ) VULKAN_HPP_NOEXCEPT : m_framebuffer( VULKAN_HPP_NAMESPACE::exchange( rhs.m_framebuffer, {} ) ) {} + + Framebuffer & operator=( Framebuffer && rhs ) VULKAN_HPP_NOEXCEPT + { + m_framebuffer = VULKAN_HPP_NAMESPACE::exchange( rhs.m_framebuffer, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR Framebuffer( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -8122,11 +8841,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::Framebuffer; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::Framebuffer; }; +#endif template <> struct isVulkanHandleType @@ -8145,7 +8866,25 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; public: - VULKAN_HPP_CONSTEXPR IndirectCommandsLayoutNV() = default; + IndirectCommandsLayoutNV() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + IndirectCommandsLayoutNV( IndirectCommandsLayoutNV const & rhs ) = default; + IndirectCommandsLayoutNV & operator=( IndirectCommandsLayoutNV const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + IndirectCommandsLayoutNV( IndirectCommandsLayoutNV && rhs ) = default; + IndirectCommandsLayoutNV & operator=( IndirectCommandsLayoutNV && rhs ) = default; +#else + IndirectCommandsLayoutNV( IndirectCommandsLayoutNV && rhs ) VULKAN_HPP_NOEXCEPT + : m_indirectCommandsLayoutNV( VULKAN_HPP_NAMESPACE::exchange( rhs.m_indirectCommandsLayoutNV, {} ) ) + { + } + + IndirectCommandsLayoutNV & operator=( IndirectCommandsLayoutNV && rhs ) VULKAN_HPP_NOEXCEPT + { + m_indirectCommandsLayoutNV = VULKAN_HPP_NAMESPACE::exchange( rhs.m_indirectCommandsLayoutNV, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR IndirectCommandsLayoutNV( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -8212,11 +8951,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV; }; +#endif template <> struct isVulkanHandleType @@ -8235,7 +8976,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; public: - VULKAN_HPP_CONSTEXPR PrivateDataSlot() = default; + PrivateDataSlot() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + PrivateDataSlot( PrivateDataSlot const & rhs ) = default; + PrivateDataSlot & operator=( PrivateDataSlot const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + PrivateDataSlot( PrivateDataSlot && rhs ) = default; + PrivateDataSlot & operator=( PrivateDataSlot && rhs ) = default; +#else + PrivateDataSlot( PrivateDataSlot && rhs ) VULKAN_HPP_NOEXCEPT : m_privateDataSlot( VULKAN_HPP_NAMESPACE::exchange( rhs.m_privateDataSlot, {} ) ) {} + + PrivateDataSlot & operator=( PrivateDataSlot && rhs ) VULKAN_HPP_NOEXCEPT + { + m_privateDataSlot = VULKAN_HPP_NAMESPACE::exchange( rhs.m_privateDataSlot, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR PrivateDataSlot( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -8299,11 +9055,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::PrivateDataSlot; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::PrivateDataSlot; }; +#endif template <> struct isVulkanHandleType @@ -8324,7 +9082,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eRenderPass; public: - VULKAN_HPP_CONSTEXPR RenderPass() = default; + RenderPass() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + RenderPass( RenderPass const & rhs ) = default; + RenderPass & operator=( RenderPass const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + RenderPass( RenderPass && rhs ) = default; + RenderPass & operator=( RenderPass && rhs ) = default; +#else + RenderPass( RenderPass && rhs ) VULKAN_HPP_NOEXCEPT : m_renderPass( VULKAN_HPP_NAMESPACE::exchange( rhs.m_renderPass, {} ) ) {} + + RenderPass & operator=( RenderPass && rhs ) VULKAN_HPP_NOEXCEPT + { + m_renderPass = VULKAN_HPP_NAMESPACE::exchange( rhs.m_renderPass, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR RenderPass( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -8394,11 +9167,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::RenderPass; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::RenderPass; }; +#endif template <> struct isVulkanHandleType @@ -8417,7 +9192,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSampler; public: - VULKAN_HPP_CONSTEXPR Sampler() = default; + Sampler() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + Sampler( Sampler const & rhs ) = default; + Sampler & operator=( Sampler const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + Sampler( Sampler && rhs ) = default; + Sampler & operator=( Sampler && rhs ) = default; +#else + Sampler( Sampler && rhs ) VULKAN_HPP_NOEXCEPT : m_sampler( VULKAN_HPP_NAMESPACE::exchange( rhs.m_sampler, {} ) ) {} + + Sampler & operator=( Sampler && rhs ) VULKAN_HPP_NOEXCEPT + { + m_sampler = VULKAN_HPP_NAMESPACE::exchange( rhs.m_sampler, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR Sampler( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -8487,11 +9277,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::Sampler; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::Sampler; }; +#endif template <> struct isVulkanHandleType @@ -8510,7 +9302,25 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSamplerYcbcrConversion; public: - VULKAN_HPP_CONSTEXPR SamplerYcbcrConversion() = default; + SamplerYcbcrConversion() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + SamplerYcbcrConversion( SamplerYcbcrConversion const & rhs ) = default; + SamplerYcbcrConversion & operator=( SamplerYcbcrConversion const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + SamplerYcbcrConversion( SamplerYcbcrConversion && rhs ) = default; + SamplerYcbcrConversion & operator=( SamplerYcbcrConversion && rhs ) = default; +#else + SamplerYcbcrConversion( SamplerYcbcrConversion && rhs ) VULKAN_HPP_NOEXCEPT + : m_samplerYcbcrConversion( VULKAN_HPP_NAMESPACE::exchange( rhs.m_samplerYcbcrConversion, {} ) ) + { + } + + SamplerYcbcrConversion & operator=( SamplerYcbcrConversion && rhs ) VULKAN_HPP_NOEXCEPT + { + m_samplerYcbcrConversion = VULKAN_HPP_NAMESPACE::exchange( rhs.m_samplerYcbcrConversion, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR SamplerYcbcrConversion( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -8583,11 +9393,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion; }; +#endif template <> struct isVulkanHandleType @@ -8608,7 +9420,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eShaderModule; public: - VULKAN_HPP_CONSTEXPR ShaderModule() = default; + ShaderModule() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + ShaderModule( ShaderModule const & rhs ) = default; + ShaderModule & operator=( ShaderModule const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + ShaderModule( ShaderModule && rhs ) = default; + ShaderModule & operator=( ShaderModule && rhs ) = default; +#else + ShaderModule( ShaderModule && rhs ) VULKAN_HPP_NOEXCEPT : m_shaderModule( VULKAN_HPP_NAMESPACE::exchange( rhs.m_shaderModule, {} ) ) {} + + ShaderModule & operator=( ShaderModule && rhs ) VULKAN_HPP_NOEXCEPT + { + m_shaderModule = VULKAN_HPP_NAMESPACE::exchange( rhs.m_shaderModule, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR ShaderModule( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -8678,11 +9505,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::ShaderModule; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::ShaderModule; }; +#endif template <> struct isVulkanHandleType @@ -8701,7 +9530,24 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eValidationCacheEXT; public: - VULKAN_HPP_CONSTEXPR ValidationCacheEXT() = default; + ValidationCacheEXT() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + ValidationCacheEXT( ValidationCacheEXT const & rhs ) = default; + ValidationCacheEXT & operator=( ValidationCacheEXT const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + ValidationCacheEXT( ValidationCacheEXT && rhs ) = default; + ValidationCacheEXT & operator=( ValidationCacheEXT && rhs ) = default; +#else + ValidationCacheEXT( ValidationCacheEXT && rhs ) VULKAN_HPP_NOEXCEPT : m_validationCacheEXT( VULKAN_HPP_NAMESPACE::exchange( rhs.m_validationCacheEXT, {} ) ) + { + } + + ValidationCacheEXT & operator=( ValidationCacheEXT && rhs ) VULKAN_HPP_NOEXCEPT + { + m_validationCacheEXT = VULKAN_HPP_NAMESPACE::exchange( rhs.m_validationCacheEXT, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR ValidationCacheEXT( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -8773,11 +9619,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::ValidationCacheEXT; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::ValidationCacheEXT; }; +#endif template <> struct isVulkanHandleType @@ -8796,7 +9644,25 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; public: - VULKAN_HPP_CONSTEXPR VideoSessionParametersKHR() = default; + VideoSessionParametersKHR() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + VideoSessionParametersKHR( VideoSessionParametersKHR const & rhs ) = default; + VideoSessionParametersKHR & operator=( VideoSessionParametersKHR const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + VideoSessionParametersKHR( VideoSessionParametersKHR && rhs ) = default; + VideoSessionParametersKHR & operator=( VideoSessionParametersKHR && rhs ) = default; +#else + VideoSessionParametersKHR( VideoSessionParametersKHR && rhs ) VULKAN_HPP_NOEXCEPT + : m_videoSessionParametersKHR( VULKAN_HPP_NAMESPACE::exchange( rhs.m_videoSessionParametersKHR, {} ) ) + { + } + + VideoSessionParametersKHR & operator=( VideoSessionParametersKHR && rhs ) VULKAN_HPP_NOEXCEPT + { + m_videoSessionParametersKHR = VULKAN_HPP_NAMESPACE::exchange( rhs.m_videoSessionParametersKHR, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR VideoSessionParametersKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -8863,11 +9729,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::VideoSessionParametersKHR; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::VideoSessionParametersKHR; }; +#endif template <> struct isVulkanHandleType @@ -8875,6 +9743,110 @@ namespace VULKAN_HPP_NAMESPACE static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; }; + class PipelineBinaryKHR + { + public: + using CType = VkPipelineBinaryKHR; + using NativeType = VkPipelineBinaryKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::ePipelineBinaryKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + + public: + PipelineBinaryKHR() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + PipelineBinaryKHR( PipelineBinaryKHR const & rhs ) = default; + PipelineBinaryKHR & operator=( PipelineBinaryKHR const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + PipelineBinaryKHR( PipelineBinaryKHR && rhs ) = default; + PipelineBinaryKHR & operator=( PipelineBinaryKHR && rhs ) = default; +#else + PipelineBinaryKHR( PipelineBinaryKHR && rhs ) VULKAN_HPP_NOEXCEPT : m_pipelineBinaryKHR( VULKAN_HPP_NAMESPACE::exchange( rhs.m_pipelineBinaryKHR, {} ) ) {} + + PipelineBinaryKHR & operator=( PipelineBinaryKHR && rhs ) VULKAN_HPP_NOEXCEPT + { + m_pipelineBinaryKHR = VULKAN_HPP_NAMESPACE::exchange( rhs.m_pipelineBinaryKHR, {} ); + return *this; + } +#endif + + VULKAN_HPP_CONSTEXPR PipelineBinaryKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} + + VULKAN_HPP_TYPESAFE_EXPLICIT PipelineBinaryKHR( VkPipelineBinaryKHR pipelineBinaryKHR ) VULKAN_HPP_NOEXCEPT : m_pipelineBinaryKHR( pipelineBinaryKHR ) {} + +#if ( VULKAN_HPP_TYPESAFE_CONVERSION == 1 ) + PipelineBinaryKHR & operator=( VkPipelineBinaryKHR pipelineBinaryKHR ) VULKAN_HPP_NOEXCEPT + { + m_pipelineBinaryKHR = pipelineBinaryKHR; + return *this; + } +#endif + + PipelineBinaryKHR & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_pipelineBinaryKHR = {}; + return *this; + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PipelineBinaryKHR const & ) const = default; +#else + bool operator==( PipelineBinaryKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_pipelineBinaryKHR == rhs.m_pipelineBinaryKHR; + } + + bool operator!=( PipelineBinaryKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_pipelineBinaryKHR != rhs.m_pipelineBinaryKHR; + } + + bool operator<( PipelineBinaryKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_pipelineBinaryKHR < rhs.m_pipelineBinaryKHR; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPipelineBinaryKHR() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineBinaryKHR; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineBinaryKHR != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineBinaryKHR == VK_NULL_HANDLE; + } + + private: + VkPipelineBinaryKHR m_pipelineBinaryKHR = {}; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::PipelineBinaryKHR; + }; + +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::PipelineBinaryKHR; + }; +#endif + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class Queue { public: @@ -8886,7 +9858,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eQueue; public: - VULKAN_HPP_CONSTEXPR Queue() = default; + Queue() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + Queue( Queue const & rhs ) = default; + Queue & operator=( Queue const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + Queue( Queue && rhs ) = default; + Queue & operator=( Queue && rhs ) = default; +#else + Queue( Queue && rhs ) VULKAN_HPP_NOEXCEPT : m_queue( VULKAN_HPP_NAMESPACE::exchange( rhs.m_queue, {} ) ) {} + + Queue & operator=( Queue && rhs ) VULKAN_HPP_NOEXCEPT + { + m_queue = VULKAN_HPP_NAMESPACE::exchange( rhs.m_queue, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR Queue( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -9036,8 +10023,8 @@ namespace VULKAN_HPP_NAMESPACE #else template VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type - setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ //=== VK_KHR_synchronization2 === @@ -9114,11 +10101,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::Queue; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::Queue; }; +#endif template <> struct isVulkanHandleType @@ -9137,7 +10126,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDevice; public: - VULKAN_HPP_CONSTEXPR Device() = default; + Device() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + Device( Device const & rhs ) = default; + Device & operator=( Device const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + Device( Device && rhs ) = default; + Device & operator=( Device && rhs ) = default; +#else + Device( Device && rhs ) VULKAN_HPP_NOEXCEPT : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) {} + + Device & operator=( Device && rhs ) VULKAN_HPP_NOEXCEPT + { + m_device = VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR Device( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -13455,12 +14459,6 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NODISCARD Result getFaultInfoEXT( VULKAN_HPP_NAMESPACE::DeviceFaultCountsEXT * pFaultCounts, VULKAN_HPP_NAMESPACE::DeviceFaultInfoEXT * pFaultInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; -#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template - VULKAN_HPP_NODISCARD typename ResultValueType>::type - getFaultInfoEXT( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; -#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ - #if defined( VK_USE_PLATFORM_FUCHSIA ) //=== VK_FUCHSIA_external_memory === @@ -13963,11 +14961,11 @@ namespace VULKAN_HPP_NAMESPACE #else template VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type - bindOpticalFlowSessionImageNV( VULKAN_HPP_NAMESPACE::OpticalFlowSessionNV session, - VULKAN_HPP_NAMESPACE::OpticalFlowSessionBindingPointNV bindingPoint, - VULKAN_HPP_NAMESPACE::ImageView view, - VULKAN_HPP_NAMESPACE::ImageLayout layout, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + bindOpticalFlowSessionImageNV( VULKAN_HPP_NAMESPACE::OpticalFlowSessionNV session, + VULKAN_HPP_NAMESPACE::OpticalFlowSessionBindingPointNV bindingPoint, + VULKAN_HPP_NAMESPACE::ImageView view, + VULKAN_HPP_NAMESPACE::ImageLayout layout, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ //=== VK_KHR_maintenance5 === @@ -14016,6 +15014,17 @@ namespace VULKAN_HPP_NAMESPACE Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + //=== VK_AMD_anti_lag === + + template + void antiLagUpdateAMD( const VULKAN_HPP_NAMESPACE::AntiLagDataAMD * pData, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void antiLagUpdateAMD( const VULKAN_HPP_NAMESPACE::AntiLagDataAMD & data, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + //=== VK_EXT_shader_object === template @@ -14105,6 +15114,113 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ShaderEXT shader, Uint8_tAllocator & uint8_tAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + //=== VK_KHR_pipeline_binary === + + template + VULKAN_HPP_NODISCARD Result createPipelineBinariesKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR * pCreateInfo, + const VULKAN_HPP_NAMESPACE::AllocationCallbacks * pAllocator, + VULKAN_HPP_NAMESPACE::PipelineBinaryHandlesInfoKHR * pBinaries, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , + typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue> + createPipelineBinariesKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR & createInfo, + Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template < + typename PipelineBinaryKHRAllocator = std::allocator, + typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, + typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue> + createPipelineBinariesKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR & createInfo, + Optional allocator, + PipelineBinaryKHRAllocator & pipelineBinaryKHRAllocator, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template >> + VULKAN_HPP_NODISCARD ResultValue, PipelineBinaryKHRAllocator>> + createPipelineBinariesKHRUnique( const VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR & createInfo, + Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template >, + typename std::enable_if< + std::is_same>::value, + int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue, PipelineBinaryKHRAllocator>> + createPipelineBinariesKHRUnique( const VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR & createInfo, + Optional allocator, + PipelineBinaryKHRAllocator & pipelineBinaryKHRAllocator, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /* VULKAN_HPP_NO_SMART_HANDLE */ +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + void destroyPipelineBinaryKHR( VULKAN_HPP_NAMESPACE::PipelineBinaryKHR pipelineBinary, + const VULKAN_HPP_NAMESPACE::AllocationCallbacks * pAllocator, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyPipelineBinaryKHR( VULKAN_HPP_NAMESPACE::PipelineBinaryKHR pipelineBinary VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, + Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + void destroy( VULKAN_HPP_NAMESPACE::PipelineBinaryKHR pipelineBinary, + const VULKAN_HPP_NAMESPACE::AllocationCallbacks * pAllocator, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::PipelineBinaryKHR pipelineBinary, + Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_NODISCARD Result getPipelineKeyKHR( const VULKAN_HPP_NAMESPACE::PipelineCreateInfoKHR * pPipelineCreateInfo, + VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR * pPipelineKey, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD typename ResultValueType::type + getPipelineKeyKHR( Optional pipelineCreateInfo VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_NODISCARD Result getPipelineBinaryDataKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryDataInfoKHR * pInfo, + VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR * pPipelineBinaryKey, + size_t * pPipelineBinaryDataSize, + void * pPipelineBinaryData, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>>::type + getPipelineBinaryDataKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryDataInfoKHR & info, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , + typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, + typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>>::type + getPipelineBinaryDataKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryDataInfoKHR & info, + Uint8_tAllocator & uint8_tAllocator, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + Result releaseCapturedPipelineDataKHR( const VULKAN_HPP_NAMESPACE::ReleaseCapturedPipelineDataInfoKHR * pInfo, + const VULKAN_HPP_NAMESPACE::AllocationCallbacks * pAllocator, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void releaseCapturedPipelineDataKHR( const VULKAN_HPP_NAMESPACE::ReleaseCapturedPipelineDataInfoKHR & info, + Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + //=== VK_QCOM_tile_properties === template @@ -14178,10 +15294,19 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::GetLatencyMarkerInfoNV * pLatencyMarkerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE - template - VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::GetLatencyMarkerInfoNV - getLatencyTimingsNV( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template , + typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector + getLatencyTimingsNV( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template < + typename LatencyTimingsFrameReportNVAllocator = std::allocator, + typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, + typename std::enable_if::value, + int>::type = 0> + VULKAN_HPP_NODISCARD std::vector + getLatencyTimingsNV( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, + LatencyTimingsFrameReportNVAllocator & latencyTimingsFrameReportNVAllocator, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ #if defined( VK_USE_PLATFORM_SCREEN_QNX ) @@ -14258,11 +15383,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::Device; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::Device; }; +#endif template <> struct isVulkanHandleType @@ -14281,7 +15408,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDisplayModeKHR; public: - VULKAN_HPP_CONSTEXPR DisplayModeKHR() = default; + DisplayModeKHR() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + DisplayModeKHR( DisplayModeKHR const & rhs ) = default; + DisplayModeKHR & operator=( DisplayModeKHR const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + DisplayModeKHR( DisplayModeKHR && rhs ) = default; + DisplayModeKHR & operator=( DisplayModeKHR && rhs ) = default; +#else + DisplayModeKHR( DisplayModeKHR && rhs ) VULKAN_HPP_NOEXCEPT : m_displayModeKHR( VULKAN_HPP_NAMESPACE::exchange( rhs.m_displayModeKHR, {} ) ) {} + + DisplayModeKHR & operator=( DisplayModeKHR && rhs ) VULKAN_HPP_NOEXCEPT + { + m_displayModeKHR = VULKAN_HPP_NAMESPACE::exchange( rhs.m_displayModeKHR, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR DisplayModeKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -14351,11 +15493,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::DisplayModeKHR; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::DisplayModeKHR; }; +#endif template <> struct isVulkanHandleType @@ -14374,7 +15518,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::ePhysicalDevice; public: - VULKAN_HPP_CONSTEXPR PhysicalDevice() = default; + PhysicalDevice() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + PhysicalDevice( PhysicalDevice const & rhs ) = default; + PhysicalDevice & operator=( PhysicalDevice const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + PhysicalDevice( PhysicalDevice && rhs ) = default; + PhysicalDevice & operator=( PhysicalDevice && rhs ) = default; +#else + PhysicalDevice( PhysicalDevice && rhs ) VULKAN_HPP_NOEXCEPT : m_physicalDevice( VULKAN_HPP_NAMESPACE::exchange( rhs.m_physicalDevice, {} ) ) {} + + PhysicalDevice & operator=( PhysicalDevice && rhs ) VULKAN_HPP_NOEXCEPT + { + m_physicalDevice = VULKAN_HPP_NAMESPACE::exchange( rhs.m_physicalDevice, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR PhysicalDevice( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -15586,7 +16745,7 @@ namespace VULKAN_HPP_NAMESPACE #else template typename ResultValueType::type - acquireDrmDisplayEXT( int32_t drmFd, VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + acquireDrmDisplayEXT( int32_t drmFd, VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ template @@ -15779,11 +16938,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::PhysicalDevice; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::PhysicalDevice; }; +#endif template <> struct isVulkanHandleType @@ -15802,7 +16963,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eInstance; public: - VULKAN_HPP_CONSTEXPR Instance() = default; + Instance() VULKAN_HPP_NOEXCEPT{}; // = default - try to workaround a compiler issue + Instance( Instance const & rhs ) = default; + Instance & operator=( Instance const & rhs ) = default; + +#if !defined( VULKAN_HPP_HANDLES_MOVE_EXCHANGE ) + Instance( Instance && rhs ) = default; + Instance & operator=( Instance && rhs ) = default; +#else + Instance( Instance && rhs ) VULKAN_HPP_NOEXCEPT : m_instance( VULKAN_HPP_NAMESPACE::exchange( rhs.m_instance, {} ) ) {} + + Instance & operator=( Instance && rhs ) VULKAN_HPP_NOEXCEPT + { + m_instance = VULKAN_HPP_NAMESPACE::exchange( rhs.m_instance, {} ); + return *this; + } +#endif VULKAN_HPP_CONSTEXPR Instance( std::nullptr_t ) VULKAN_HPP_NOEXCEPT {} @@ -16451,11 +17627,13 @@ namespace VULKAN_HPP_NAMESPACE using Type = VULKAN_HPP_NAMESPACE::Instance; }; +#if ( VK_USE_64_BIT_PTR_DEFINES == 1 ) template <> struct CppType { using Type = VULKAN_HPP_NAMESPACE::Instance; }; +#endif template <> struct isVulkanHandleType diff --git a/third_party/vulkan/vulkan_hash.hpp b/third_party/vulkan/vulkan_hash.hpp index 8adf257..1894033 100644 --- a/third_party/vulkan/vulkan_hash.hpp +++ b/third_party/vulkan/vulkan_hash.hpp @@ -525,6 +525,17 @@ namespace std } }; + //=== VK_KHR_pipeline_binary === + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineBinaryKHR const & pipelineBinaryKHR ) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}( static_cast( pipelineBinaryKHR ) ); + } + }; + #if 14 <= VULKAN_HPP_CPP_VERSION //====================================== //=== HASH structures for structures === @@ -1062,6 +1073,35 @@ namespace std }; # endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::AntiLagPresentationInfoAMD const & antiLagPresentationInfoAMD ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, antiLagPresentationInfoAMD.sType ); + VULKAN_HPP_HASH_COMBINE( seed, antiLagPresentationInfoAMD.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, antiLagPresentationInfoAMD.stage ); + VULKAN_HPP_HASH_COMBINE( seed, antiLagPresentationInfoAMD.frameIndex ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::AntiLagDataAMD const & antiLagDataAMD ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, antiLagDataAMD.sType ); + VULKAN_HPP_HASH_COMBINE( seed, antiLagDataAMD.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, antiLagDataAMD.mode ); + VULKAN_HPP_HASH_COMBINE( seed, antiLagDataAMD.maxFPS ); + VULKAN_HPP_HASH_COMBINE( seed, antiLagDataAMD.pPresentationInfo ); + return seed; + } + }; + template <> struct hash { @@ -3990,6 +4030,20 @@ namespace std } }; + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::DevicePipelineBinaryInternalCacheControlKHR const & devicePipelineBinaryInternalCacheControlKHR ) const + VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, devicePipelineBinaryInternalCacheControlKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, devicePipelineBinaryInternalCacheControlKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, devicePipelineBinaryInternalCacheControlKHR.disableInternalCache ); + return seed; + } + }; + template <> struct hash { @@ -5637,6 +5691,19 @@ namespace std }; # endif /*VK_USE_PLATFORM_IOS_MVK*/ + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::ImageAlignmentControlCreateInfoMESA const & imageAlignmentControlCreateInfoMESA ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, imageAlignmentControlCreateInfoMESA.sType ); + VULKAN_HPP_HASH_COMBINE( seed, imageAlignmentControlCreateInfoMESA.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, imageAlignmentControlCreateInfoMESA.maximumRequestedAlignment ); + return seed; + } + }; + template <> struct hash { @@ -7478,6 +7545,19 @@ namespace std } }; + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceAntiLagFeaturesAMD const & physicalDeviceAntiLagFeaturesAMD ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceAntiLagFeaturesAMD.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceAntiLagFeaturesAMD.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceAntiLagFeaturesAMD.antiLag ); + return seed; + } + }; + template <> struct hash { @@ -7669,16 +7749,45 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceComputeShaderDerivativesFeaturesNV const & physicalDeviceComputeShaderDerivativesFeaturesNV ) + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceCommandBufferInheritanceFeaturesNV const & physicalDeviceCommandBufferInheritanceFeaturesNV ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceComputeShaderDerivativesFeaturesNV.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceComputeShaderDerivativesFeaturesNV.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceComputeShaderDerivativesFeaturesNV.computeDerivativeGroupQuads ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceComputeShaderDerivativesFeaturesNV.computeDerivativeGroupLinear ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceCommandBufferInheritanceFeaturesNV.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceCommandBufferInheritanceFeaturesNV.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceCommandBufferInheritanceFeaturesNV.commandBufferInheritance ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceComputeShaderDerivativesFeaturesKHR const & physicalDeviceComputeShaderDerivativesFeaturesKHR ) + const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceComputeShaderDerivativesFeaturesKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceComputeShaderDerivativesFeaturesKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceComputeShaderDerivativesFeaturesKHR.computeDerivativeGroupQuads ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceComputeShaderDerivativesFeaturesKHR.computeDerivativeGroupLinear ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceComputeShaderDerivativesPropertiesKHR const & physicalDeviceComputeShaderDerivativesPropertiesKHR ) const + VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceComputeShaderDerivativesPropertiesKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceComputeShaderDerivativesPropertiesKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceComputeShaderDerivativesPropertiesKHR.meshAndTaskShaderDerivatives ); return seed; } }; @@ -9116,6 +9225,34 @@ namespace std } }; + template <> + struct hash + { + std::size_t operator()( + VULKAN_HPP_NAMESPACE::PhysicalDeviceImageAlignmentControlFeaturesMESA const & physicalDeviceImageAlignmentControlFeaturesMESA ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceImageAlignmentControlFeaturesMESA.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceImageAlignmentControlFeaturesMESA.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceImageAlignmentControlFeaturesMESA.imageAlignmentControl ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceImageAlignmentControlPropertiesMESA const & physicalDeviceImageAlignmentControlPropertiesMESA ) + const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceImageAlignmentControlPropertiesMESA.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceImageAlignmentControlPropertiesMESA.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceImageAlignmentControlPropertiesMESA.supportedImageAlignmentMask ); + return seed; + } + }; + template <> struct hash { @@ -9385,29 +9522,36 @@ namespace std }; template <> - struct hash + struct hash { std::size_t - operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredDriverPropertiesMSFT const & physicalDeviceLayeredDriverPropertiesMSFT ) const VULKAN_HPP_NOEXCEPT + operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiPropertiesKHR const & physicalDeviceLayeredApiPropertiesKHR ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredDriverPropertiesMSFT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredDriverPropertiesMSFT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredDriverPropertiesMSFT.underlyingAPI ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredApiPropertiesKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredApiPropertiesKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredApiPropertiesKHR.vendorID ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredApiPropertiesKHR.deviceID ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredApiPropertiesKHR.layeredAPI ); + for ( size_t i = 0; i < VK_MAX_PHYSICAL_DEVICE_NAME_SIZE; ++i ) + { + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredApiPropertiesKHR.deviceName[i] ); + } return seed; } }; template <> - struct hash + struct hash { std::size_t - operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceLegacyDitheringFeaturesEXT const & physicalDeviceLegacyDitheringFeaturesEXT ) const VULKAN_HPP_NOEXCEPT + operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiPropertiesListKHR const & physicalDeviceLayeredApiPropertiesListKHR ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLegacyDitheringFeaturesEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLegacyDitheringFeaturesEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLegacyDitheringFeaturesEXT.legacyDithering ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredApiPropertiesListKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredApiPropertiesListKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredApiPropertiesListKHR.layeredApiCount ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredApiPropertiesListKHR.pLayeredApis ); return seed; } }; @@ -9546,6 +9690,129 @@ namespace std } }; + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseProperties const & physicalDeviceSparseProperties ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceSparseProperties.residencyStandard2DBlockShape ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceSparseProperties.residencyStandard2DMultisampleBlockShape ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceSparseProperties.residencyStandard3DBlockShape ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceSparseProperties.residencyAlignedMipSize ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceSparseProperties.residencyNonResidentStrict ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties const & physicalDeviceProperties ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.apiVersion ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.driverVersion ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.vendorID ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.deviceID ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.deviceType ); + for ( size_t i = 0; i < VK_MAX_PHYSICAL_DEVICE_NAME_SIZE; ++i ) + { + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.deviceName[i] ); + } + for ( size_t i = 0; i < VK_UUID_SIZE; ++i ) + { + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.pipelineCacheUUID[i] ); + } + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.limits ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.sparseProperties ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 const & physicalDeviceProperties2 ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties2.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties2.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties2.properties ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiVulkanPropertiesKHR const & physicalDeviceLayeredApiVulkanPropertiesKHR ) const + VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredApiVulkanPropertiesKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredApiVulkanPropertiesKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredApiVulkanPropertiesKHR.properties ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredDriverPropertiesMSFT const & physicalDeviceLayeredDriverPropertiesMSFT ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredDriverPropertiesMSFT.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredDriverPropertiesMSFT.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLayeredDriverPropertiesMSFT.underlyingAPI ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceLegacyDitheringFeaturesEXT const & physicalDeviceLegacyDitheringFeaturesEXT ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLegacyDitheringFeaturesEXT.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLegacyDitheringFeaturesEXT.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLegacyDitheringFeaturesEXT.legacyDithering ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( + VULKAN_HPP_NAMESPACE::PhysicalDeviceLegacyVertexAttributesFeaturesEXT const & physicalDeviceLegacyVertexAttributesFeaturesEXT ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLegacyVertexAttributesFeaturesEXT.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLegacyVertexAttributesFeaturesEXT.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLegacyVertexAttributesFeaturesEXT.legacyVertexAttributes ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceLegacyVertexAttributesPropertiesEXT const & physicalDeviceLegacyVertexAttributesPropertiesEXT ) + const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLegacyVertexAttributesPropertiesEXT.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLegacyVertexAttributesPropertiesEXT.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLegacyVertexAttributesPropertiesEXT.nativeUnalignedPerformance ); + return seed; + } + }; + template <> struct hash { @@ -9696,6 +9963,41 @@ namespace std } }; + template <> + struct hash + { + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance7FeaturesKHR const & physicalDeviceMaintenance7FeaturesKHR ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance7FeaturesKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance7FeaturesKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance7FeaturesKHR.maintenance7 ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance7PropertiesKHR const & physicalDeviceMaintenance7PropertiesKHR ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance7PropertiesKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance7PropertiesKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance7PropertiesKHR.robustFragmentShadingRateAttachmentAccess ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance7PropertiesKHR.separateDepthStencilAttachmentAccess ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance7PropertiesKHR.maxDescriptorSetTotalUniformBuffersDynamic ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance7PropertiesKHR.maxDescriptorSetTotalStorageBuffersDynamic ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance7PropertiesKHR.maxDescriptorSetTotalBuffersDynamic ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance7PropertiesKHR.maxDescriptorSetUpdateAfterBindTotalUniformBuffersDynamic ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance7PropertiesKHR.maxDescriptorSetUpdateAfterBindTotalStorageBuffersDynamic ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance7PropertiesKHR.maxDescriptorSetUpdateAfterBindTotalBuffersDynamic ); + return seed; + } + }; + template <> struct hash { @@ -10252,6 +10554,38 @@ namespace std } }; + template <> + struct hash + { + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineBinaryFeaturesKHR const & physicalDevicePipelineBinaryFeaturesKHR ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineBinaryFeaturesKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineBinaryFeaturesKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineBinaryFeaturesKHR.pipelineBinaries ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineBinaryPropertiesKHR const & physicalDevicePipelineBinaryPropertiesKHR ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineBinaryPropertiesKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineBinaryPropertiesKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineBinaryPropertiesKHR.pipelineBinaryInternalCache ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineBinaryPropertiesKHR.pipelineBinaryInternalCacheControl ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineBinaryPropertiesKHR.pipelineBinaryPrefersInternalCache ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineBinaryPropertiesKHR.pipelineBinaryPrecompiledInternalCache ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineBinaryPropertiesKHR.pipelineBinaryCompressedData ); + return seed; + } + }; + template <> struct hash { @@ -10498,59 +10832,6 @@ namespace std } }; - template <> - struct hash - { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseProperties const & physicalDeviceSparseProperties ) const VULKAN_HPP_NOEXCEPT - { - std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceSparseProperties.residencyStandard2DBlockShape ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceSparseProperties.residencyStandard2DMultisampleBlockShape ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceSparseProperties.residencyStandard3DBlockShape ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceSparseProperties.residencyAlignedMipSize ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceSparseProperties.residencyNonResidentStrict ); - return seed; - } - }; - - template <> - struct hash - { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties const & physicalDeviceProperties ) const VULKAN_HPP_NOEXCEPT - { - std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.apiVersion ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.driverVersion ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.vendorID ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.deviceID ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.deviceType ); - for ( size_t i = 0; i < VK_MAX_PHYSICAL_DEVICE_NAME_SIZE; ++i ) - { - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.deviceName[i] ); - } - for ( size_t i = 0; i < VK_UUID_SIZE; ++i ) - { - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.pipelineCacheUUID[i] ); - } - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.limits ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties.sparseProperties ); - return seed; - } - }; - - template <> - struct hash - { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 const & physicalDeviceProperties2 ) const VULKAN_HPP_NOEXCEPT - { - std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties2.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties2.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceProperties2.properties ); - return seed; - } - }; - template <> struct hash { @@ -11516,6 +11797,35 @@ namespace std } }; + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const & + physicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR.shaderRelaxedExtendedInstruction ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderReplicatedCompositesFeaturesEXT const & physicalDeviceShaderReplicatedCompositesFeaturesEXT ) const + VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderReplicatedCompositesFeaturesEXT.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderReplicatedCompositesFeaturesEXT.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderReplicatedCompositesFeaturesEXT.shaderReplicatedComposites ); + return seed; + } + }; + template <> struct hash { @@ -12475,6 +12785,116 @@ namespace std } }; + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR const & pipelineBinaryKeyKHR ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryKeyKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryKeyKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryKeyKHR.keySize ); + for ( size_t i = 0; i < VK_MAX_PIPELINE_BINARY_KEY_SIZE_KHR; ++i ) + { + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryKeyKHR.key[i] ); + } + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineBinaryDataKHR const & pipelineBinaryDataKHR ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryDataKHR.dataSize ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryDataKHR.pData ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineBinaryKeysAndDataKHR const & pipelineBinaryKeysAndDataKHR ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryKeysAndDataKHR.binaryCount ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryKeysAndDataKHR.pPipelineBinaryKeys ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryKeysAndDataKHR.pPipelineBinaryData ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineCreateInfoKHR const & pipelineCreateInfoKHR ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, pipelineCreateInfoKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineCreateInfoKHR.pNext ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR const & pipelineBinaryCreateInfoKHR ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryCreateInfoKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryCreateInfoKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryCreateInfoKHR.pKeysAndDataInfo ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryCreateInfoKHR.pipeline ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryCreateInfoKHR.pPipelineCreateInfo ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineBinaryDataInfoKHR const & pipelineBinaryDataInfoKHR ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryDataInfoKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryDataInfoKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryDataInfoKHR.pipelineBinary ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineBinaryHandlesInfoKHR const & pipelineBinaryHandlesInfoKHR ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryHandlesInfoKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryHandlesInfoKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryHandlesInfoKHR.pipelineBinaryCount ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryHandlesInfoKHR.pPipelineBinaries ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineBinaryInfoKHR const & pipelineBinaryInfoKHR ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryInfoKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryInfoKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryInfoKHR.binaryCount ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineBinaryInfoKHR.pPipelineBinaries ); + return seed; + } + }; + template <> struct hash { @@ -13689,6 +14109,19 @@ namespace std } }; + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::ReleaseCapturedPipelineDataInfoKHR const & releaseCapturedPipelineDataInfoKHR ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, releaseCapturedPipelineDataInfoKHR.sType ); + VULKAN_HPP_HASH_COMBINE( seed, releaseCapturedPipelineDataInfoKHR.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, releaseCapturedPipelineDataInfoKHR.pipeline ); + return seed; + } + }; + template <> struct hash { diff --git a/third_party/vulkan/vulkan_metal.h b/third_party/vulkan/vulkan_metal.h index e6f7bf7..89a5574 100644 --- a/third_party/vulkan/vulkan_metal.h +++ b/third_party/vulkan/vulkan_metal.h @@ -52,28 +52,28 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreateMetalSurfaceEXT( #define VK_EXT_metal_objects 1 #ifdef __OBJC__ @protocol MTLDevice; -typedef id MTLDevice_id; +typedef __unsafe_unretained id MTLDevice_id; #else typedef void* MTLDevice_id; #endif #ifdef __OBJC__ @protocol MTLCommandQueue; -typedef id MTLCommandQueue_id; +typedef __unsafe_unretained id MTLCommandQueue_id; #else typedef void* MTLCommandQueue_id; #endif #ifdef __OBJC__ @protocol MTLBuffer; -typedef id MTLBuffer_id; +typedef __unsafe_unretained id MTLBuffer_id; #else typedef void* MTLBuffer_id; #endif #ifdef __OBJC__ @protocol MTLTexture; -typedef id MTLTexture_id; +typedef __unsafe_unretained id MTLTexture_id; #else typedef void* MTLTexture_id; #endif @@ -81,12 +81,12 @@ typedef void* MTLTexture_id; typedef struct __IOSurface* IOSurfaceRef; #ifdef __OBJC__ @protocol MTLSharedEvent; -typedef id MTLSharedEvent_id; +typedef __unsafe_unretained id MTLSharedEvent_id; #else typedef void* MTLSharedEvent_id; #endif -#define VK_EXT_METAL_OBJECTS_SPEC_VERSION 1 +#define VK_EXT_METAL_OBJECTS_SPEC_VERSION 2 #define VK_EXT_METAL_OBJECTS_EXTENSION_NAME "VK_EXT_metal_objects" typedef enum VkExportMetalObjectTypeFlagBitsEXT { diff --git a/third_party/vulkan/vulkan_raii.hpp b/third_party/vulkan/vulkan_raii.hpp index 0149b00..ac672bf 100644 --- a/third_party/vulkan/vulkan_raii.hpp +++ b/third_party/vulkan/vulkan_raii.hpp @@ -9,7 +9,7 @@ #define VULKAN_RAII_HPP #include // std::unique_ptr -#include // std::exchange, std::forward +#include // std::forward #include #if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) @@ -17,18 +17,6 @@ namespace VULKAN_HPP_NAMESPACE { namespace VULKAN_HPP_RAII_NAMESPACE { -# if ( 14 <= VULKAN_HPP_CPP_VERSION ) - using std::exchange; -# else - template - VULKAN_HPP_CONSTEXPR_14 VULKAN_HPP_INLINE T exchange( T & obj, U && newValue ) - { - T oldValue = std::move( obj ); - obj = std::forward( newValue ); - return oldValue; - } -# endif - template class CreateReturnType { @@ -1680,12 +1668,22 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkGetDeviceImageSubresourceLayoutKHR( vkGetDeviceProcAddr( device, "vkGetDeviceImageSubresourceLayoutKHR" ) ); vkGetImageSubresourceLayout2KHR = PFN_vkGetImageSubresourceLayout2KHR( vkGetDeviceProcAddr( device, "vkGetImageSubresourceLayout2KHR" ) ); + //=== VK_AMD_anti_lag === + vkAntiLagUpdateAMD = PFN_vkAntiLagUpdateAMD( vkGetDeviceProcAddr( device, "vkAntiLagUpdateAMD" ) ); + //=== VK_EXT_shader_object === vkCreateShadersEXT = PFN_vkCreateShadersEXT( vkGetDeviceProcAddr( device, "vkCreateShadersEXT" ) ); vkDestroyShaderEXT = PFN_vkDestroyShaderEXT( vkGetDeviceProcAddr( device, "vkDestroyShaderEXT" ) ); vkGetShaderBinaryDataEXT = PFN_vkGetShaderBinaryDataEXT( vkGetDeviceProcAddr( device, "vkGetShaderBinaryDataEXT" ) ); vkCmdBindShadersEXT = PFN_vkCmdBindShadersEXT( vkGetDeviceProcAddr( device, "vkCmdBindShadersEXT" ) ); + //=== VK_KHR_pipeline_binary === + vkCreatePipelineBinariesKHR = PFN_vkCreatePipelineBinariesKHR( vkGetDeviceProcAddr( device, "vkCreatePipelineBinariesKHR" ) ); + vkDestroyPipelineBinaryKHR = PFN_vkDestroyPipelineBinaryKHR( vkGetDeviceProcAddr( device, "vkDestroyPipelineBinaryKHR" ) ); + vkGetPipelineKeyKHR = PFN_vkGetPipelineKeyKHR( vkGetDeviceProcAddr( device, "vkGetPipelineKeyKHR" ) ); + vkGetPipelineBinaryDataKHR = PFN_vkGetPipelineBinaryDataKHR( vkGetDeviceProcAddr( device, "vkGetPipelineBinaryDataKHR" ) ); + vkReleaseCapturedPipelineDataKHR = PFN_vkReleaseCapturedPipelineDataKHR( vkGetDeviceProcAddr( device, "vkReleaseCapturedPipelineDataKHR" ) ); + //=== VK_QCOM_tile_properties === vkGetFramebufferTilePropertiesQCOM = PFN_vkGetFramebufferTilePropertiesQCOM( vkGetDeviceProcAddr( device, "vkGetFramebufferTilePropertiesQCOM" ) ); vkGetDynamicRenderingTilePropertiesQCOM = @@ -2564,12 +2562,22 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR = 0; PFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR = 0; + //=== VK_AMD_anti_lag === + PFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD = 0; + //=== VK_EXT_shader_object === PFN_vkCreateShadersEXT vkCreateShadersEXT = 0; PFN_vkDestroyShaderEXT vkDestroyShaderEXT = 0; PFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT = 0; PFN_vkCmdBindShadersEXT vkCmdBindShadersEXT = 0; + //=== VK_KHR_pipeline_binary === + PFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR = 0; + PFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR = 0; + PFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR = 0; + PFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR = 0; + PFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR = 0; + //=== VK_QCOM_tile_properties === PFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM = 0; PFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM = 0; @@ -2706,6 +2714,9 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_shader_object === class ShaderEXT; + //=== VK_KHR_pipeline_binary === + class PipelineBinaryKHR; + //==================== //=== RAII HANDLES === //==================== @@ -2827,8 +2838,8 @@ namespace VULKAN_HPP_NAMESPACE Instance( Instance const & ) = delete; Instance( Instance && rhs ) VULKAN_HPP_NOEXCEPT - : m_instance( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_instance, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + : m_instance( VULKAN_HPP_NAMESPACE::exchange( rhs.m_instance, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) , m_dispatcher( rhs.m_dispatcher.release() ) { } @@ -2871,7 +2882,7 @@ namespace VULKAN_HPP_NAMESPACE { m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_instance, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_instance, nullptr ); } VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::InstanceDispatcher const * getDispatcher() const @@ -3096,8 +3107,8 @@ namespace VULKAN_HPP_NAMESPACE PhysicalDevice( PhysicalDevice const & rhs ) : m_physicalDevice( rhs.m_physicalDevice ), m_dispatcher( rhs.m_dispatcher ) {} PhysicalDevice( PhysicalDevice && rhs ) VULKAN_HPP_NOEXCEPT - : m_physicalDevice( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_physicalDevice, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_physicalDevice( VULKAN_HPP_NAMESPACE::exchange( rhs.m_physicalDevice, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -3137,7 +3148,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::PhysicalDevice release() { m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_physicalDevice, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_physicalDevice, nullptr ); } VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::InstanceDispatcher const * getDispatcher() const @@ -3565,8 +3576,8 @@ namespace VULKAN_HPP_NAMESPACE Device( Device const & ) = delete; Device( Device && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) , m_dispatcher( rhs.m_dispatcher.release() ) { } @@ -3609,7 +3620,7 @@ namespace VULKAN_HPP_NAMESPACE { m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_device, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_device, nullptr ); } VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::DeviceDispatcher const * getDispatcher() const @@ -4412,9 +4423,10 @@ namespace VULKAN_HPP_NAMESPACE getAccelerationStructureOpaqueCaptureDescriptorDataEXT( const VULKAN_HPP_NAMESPACE::AccelerationStructureCaptureDescriptorDataInfoEXT & info ) const; //=== VK_EXT_device_fault === - - VULKAN_HPP_NODISCARD std::pair getFaultInfoEXT() const; - + template + VULKAN_HPP_NODISCARD Result getFaultInfoEXT( VULKAN_HPP_NAMESPACE::DeviceFaultCountsEXT * pFaultCounts, + VULKAN_HPP_NAMESPACE::DeviceFaultInfoEXT * pFaultInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; # if defined( VK_USE_PLATFORM_FUCHSIA ) //=== VK_FUCHSIA_external_memory === @@ -4552,6 +4564,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::StructureChain getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR & info ) const VULKAN_HPP_NOEXCEPT; + //=== VK_AMD_anti_lag === + + void antiLagUpdateAMD( const VULKAN_HPP_NAMESPACE::AntiLagDataAMD & data ) const VULKAN_HPP_NOEXCEPT; + //=== VK_EXT_shader_object === VULKAN_HPP_NODISCARD @@ -4564,6 +4580,23 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) const VULKAN_HPP_RAII_CREATE_NOEXCEPT; + //=== VK_KHR_pipeline_binary === + + VULKAN_HPP_NODISCARD + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::CreateReturnType>::Type + createPipelineBinariesKHR( VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR getPipelineKeyKHR( + Optional pipelineCreateInfo VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT ) const; + + VULKAN_HPP_NODISCARD std::pair> + getPipelineBinaryDataKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryDataInfoKHR & info ) const; + + void releaseCapturedPipelineDataKHR( const VULKAN_HPP_NAMESPACE::ReleaseCapturedPipelineDataInfoKHR & info, + Optional allocator + VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + //=== VK_QCOM_tile_properties === VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::TilePropertiesQCOM @@ -4633,10 +4666,10 @@ namespace VULKAN_HPP_NAMESPACE AccelerationStructureKHR( AccelerationStructureKHR const & ) = delete; AccelerationStructureKHR( AccelerationStructureKHR && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_accelerationStructure( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_accelerationStructure, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_accelerationStructure( VULKAN_HPP_NAMESPACE::exchange( rhs.m_accelerationStructure, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -4683,7 +4716,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_accelerationStructure, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_accelerationStructure, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -4753,10 +4786,10 @@ namespace VULKAN_HPP_NAMESPACE AccelerationStructureNV( AccelerationStructureNV const & ) = delete; AccelerationStructureNV( AccelerationStructureNV && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_accelerationStructure( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_accelerationStructure, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_accelerationStructure( VULKAN_HPP_NAMESPACE::exchange( rhs.m_accelerationStructure, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -4803,7 +4836,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_accelerationStructure, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_accelerationStructure, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -4881,10 +4914,10 @@ namespace VULKAN_HPP_NAMESPACE Buffer( Buffer const & ) = delete; Buffer( Buffer && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_buffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_buffer, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_buffer( VULKAN_HPP_NAMESPACE::exchange( rhs.m_buffer, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -4930,7 +4963,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_buffer, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_buffer, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -5007,10 +5040,10 @@ namespace VULKAN_HPP_NAMESPACE BufferCollectionFUCHSIA( BufferCollectionFUCHSIA const & ) = delete; BufferCollectionFUCHSIA( BufferCollectionFUCHSIA && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_collection( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_collection, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_collection( VULKAN_HPP_NAMESPACE::exchange( rhs.m_collection, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -5057,7 +5090,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_collection, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_collection, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -5136,10 +5169,10 @@ namespace VULKAN_HPP_NAMESPACE BufferView( BufferView const & ) = delete; BufferView( BufferView && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_bufferView( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_bufferView, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_bufferView( VULKAN_HPP_NAMESPACE::exchange( rhs.m_bufferView, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -5185,7 +5218,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_bufferView, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_bufferView, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -5255,10 +5288,10 @@ namespace VULKAN_HPP_NAMESPACE CommandPool( CommandPool const & ) = delete; CommandPool( CommandPool && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_commandPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_commandPool, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_commandPool( VULKAN_HPP_NAMESPACE::exchange( rhs.m_commandPool, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -5304,7 +5337,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_commandPool, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_commandPool, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -5372,10 +5405,10 @@ namespace VULKAN_HPP_NAMESPACE CommandBuffer( CommandBuffer const & ) = delete; CommandBuffer( CommandBuffer && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_commandPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_commandPool, {} ) ) - , m_commandBuffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_commandBuffer, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_commandPool( VULKAN_HPP_NAMESPACE::exchange( rhs.m_commandPool, {} ) ) + , m_commandBuffer( VULKAN_HPP_NAMESPACE::exchange( rhs.m_commandBuffer, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -5421,7 +5454,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_commandPool = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_commandBuffer, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_commandBuffer, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -6036,7 +6069,8 @@ namespace VULKAN_HPP_NAMESPACE void setRenderingAttachmentLocationsKHR( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfoKHR & locationInfo ) const VULKAN_HPP_NOEXCEPT; - void setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR & locationInfo ) const VULKAN_HPP_NOEXCEPT; + void setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR & inputAttachmentIndexInfo ) const + VULKAN_HPP_NOEXCEPT; //=== VK_EXT_line_rasterization === @@ -6456,10 +6490,10 @@ namespace VULKAN_HPP_NAMESPACE CuFunctionNVX( CuFunctionNVX const & ) = delete; CuFunctionNVX( CuFunctionNVX && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_function( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_function, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_function( VULKAN_HPP_NAMESPACE::exchange( rhs.m_function, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -6505,7 +6539,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_function, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_function, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -6575,10 +6609,10 @@ namespace VULKAN_HPP_NAMESPACE CuModuleNVX( CuModuleNVX const & ) = delete; CuModuleNVX( CuModuleNVX && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_module( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_module, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_module( VULKAN_HPP_NAMESPACE::exchange( rhs.m_module, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -6624,7 +6658,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_module, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_module, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -6695,10 +6729,10 @@ namespace VULKAN_HPP_NAMESPACE CudaFunctionNV( CudaFunctionNV const & ) = delete; CudaFunctionNV( CudaFunctionNV && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_function( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_function, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_function( VULKAN_HPP_NAMESPACE::exchange( rhs.m_function, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -6744,7 +6778,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_function, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_function, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -6816,10 +6850,10 @@ namespace VULKAN_HPP_NAMESPACE CudaModuleNV( CudaModuleNV const & ) = delete; CudaModuleNV( CudaModuleNV && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_module( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_module, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_module( VULKAN_HPP_NAMESPACE::exchange( rhs.m_module, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -6865,7 +6899,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_module, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_module, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -6940,10 +6974,10 @@ namespace VULKAN_HPP_NAMESPACE DebugReportCallbackEXT( DebugReportCallbackEXT const & ) = delete; DebugReportCallbackEXT( DebugReportCallbackEXT && rhs ) VULKAN_HPP_NOEXCEPT - : m_instance( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_instance, {} ) ) - , m_callback( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_callback, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_instance( VULKAN_HPP_NAMESPACE::exchange( rhs.m_instance, {} ) ) + , m_callback( VULKAN_HPP_NAMESPACE::exchange( rhs.m_callback, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -6990,7 +7024,7 @@ namespace VULKAN_HPP_NAMESPACE m_instance = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_callback, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_callback, nullptr ); } VULKAN_HPP_NAMESPACE::Instance getInstance() const @@ -7060,10 +7094,10 @@ namespace VULKAN_HPP_NAMESPACE DebugUtilsMessengerEXT( DebugUtilsMessengerEXT const & ) = delete; DebugUtilsMessengerEXT( DebugUtilsMessengerEXT && rhs ) VULKAN_HPP_NOEXCEPT - : m_instance( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_instance, {} ) ) - , m_messenger( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_messenger, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_instance( VULKAN_HPP_NAMESPACE::exchange( rhs.m_instance, {} ) ) + , m_messenger( VULKAN_HPP_NAMESPACE::exchange( rhs.m_messenger, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -7110,7 +7144,7 @@ namespace VULKAN_HPP_NAMESPACE m_instance = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_messenger, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_messenger, nullptr ); } VULKAN_HPP_NAMESPACE::Instance getInstance() const @@ -7179,10 +7213,10 @@ namespace VULKAN_HPP_NAMESPACE DeferredOperationKHR( DeferredOperationKHR const & ) = delete; DeferredOperationKHR( DeferredOperationKHR && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_operation( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_operation, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_operation( VULKAN_HPP_NAMESPACE::exchange( rhs.m_operation, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -7229,7 +7263,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_operation, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_operation, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -7307,10 +7341,10 @@ namespace VULKAN_HPP_NAMESPACE DescriptorPool( DescriptorPool const & ) = delete; DescriptorPool( DescriptorPool && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_descriptorPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_descriptorPool, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_descriptorPool( VULKAN_HPP_NAMESPACE::exchange( rhs.m_descriptorPool, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -7357,7 +7391,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_descriptorPool, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_descriptorPool, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -7417,10 +7451,10 @@ namespace VULKAN_HPP_NAMESPACE DescriptorSet( DescriptorSet const & ) = delete; DescriptorSet( DescriptorSet && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_descriptorPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_descriptorPool, {} ) ) - , m_descriptorSet( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_descriptorSet, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_descriptorPool( VULKAN_HPP_NAMESPACE::exchange( rhs.m_descriptorPool, {} ) ) + , m_descriptorSet( VULKAN_HPP_NAMESPACE::exchange( rhs.m_descriptorSet, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -7468,7 +7502,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_descriptorPool = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_descriptorSet, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_descriptorSet, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -7578,10 +7612,10 @@ namespace VULKAN_HPP_NAMESPACE DescriptorSetLayout( DescriptorSetLayout const & ) = delete; DescriptorSetLayout( DescriptorSetLayout && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_descriptorSetLayout( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_descriptorSetLayout, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_descriptorSetLayout( VULKAN_HPP_NAMESPACE::exchange( rhs.m_descriptorSetLayout, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -7628,7 +7662,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_descriptorSetLayout, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_descriptorSetLayout, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -7704,10 +7738,10 @@ namespace VULKAN_HPP_NAMESPACE DescriptorUpdateTemplate( DescriptorUpdateTemplate const & ) = delete; DescriptorUpdateTemplate( DescriptorUpdateTemplate && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_descriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_descriptorUpdateTemplate, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_descriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::exchange( rhs.m_descriptorUpdateTemplate, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -7754,7 +7788,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_descriptorUpdateTemplate, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_descriptorUpdateTemplate, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -7824,10 +7858,10 @@ namespace VULKAN_HPP_NAMESPACE DeviceMemory( DeviceMemory const & ) = delete; DeviceMemory( DeviceMemory && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_memory( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_memory, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_memory( VULKAN_HPP_NAMESPACE::exchange( rhs.m_memory, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -7873,7 +7907,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_memory, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_memory, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -7974,9 +8008,9 @@ namespace VULKAN_HPP_NAMESPACE DisplayKHR( DisplayKHR const & ) = delete; DisplayKHR( DisplayKHR && rhs ) VULKAN_HPP_NOEXCEPT - : m_physicalDevice( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_physicalDevice, {} ) ) - , m_display( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_display, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_physicalDevice( VULKAN_HPP_NAMESPACE::exchange( rhs.m_physicalDevice, {} ) ) + , m_display( VULKAN_HPP_NAMESPACE::exchange( rhs.m_display, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -8018,7 +8052,7 @@ namespace VULKAN_HPP_NAMESPACE { m_physicalDevice = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_display, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_display, nullptr ); } VULKAN_HPP_NAMESPACE::PhysicalDevice getPhysicalDevice() const @@ -8126,9 +8160,9 @@ namespace VULKAN_HPP_NAMESPACE DisplayModeKHR( DisplayModeKHR const & rhs ) : m_displayModeKHR( rhs.m_displayModeKHR ), m_dispatcher( rhs.m_dispatcher ) {} DisplayModeKHR( DisplayModeKHR && rhs ) VULKAN_HPP_NOEXCEPT - : m_physicalDevice( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_physicalDevice, {} ) ) - , m_displayModeKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_displayModeKHR, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_physicalDevice( VULKAN_HPP_NAMESPACE::exchange( rhs.m_physicalDevice, {} ) ) + , m_displayModeKHR( VULKAN_HPP_NAMESPACE::exchange( rhs.m_displayModeKHR, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -8171,7 +8205,7 @@ namespace VULKAN_HPP_NAMESPACE { m_physicalDevice = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_displayModeKHR, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_displayModeKHR, nullptr ); } VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::InstanceDispatcher const * getDispatcher() const @@ -8238,10 +8272,10 @@ namespace VULKAN_HPP_NAMESPACE Event( Event const & ) = delete; Event( Event && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_event( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_event, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_event( VULKAN_HPP_NAMESPACE::exchange( rhs.m_event, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -8287,7 +8321,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_event, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_event, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -8384,10 +8418,10 @@ namespace VULKAN_HPP_NAMESPACE Fence( Fence const & ) = delete; Fence( Fence && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_fence( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_fence, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_fence( VULKAN_HPP_NAMESPACE::exchange( rhs.m_fence, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -8433,7 +8467,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_fence, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_fence, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -8507,10 +8541,10 @@ namespace VULKAN_HPP_NAMESPACE Framebuffer( Framebuffer const & ) = delete; Framebuffer( Framebuffer && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_framebuffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_framebuffer, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_framebuffer( VULKAN_HPP_NAMESPACE::exchange( rhs.m_framebuffer, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -8556,7 +8590,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_framebuffer, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_framebuffer, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -8630,10 +8664,10 @@ namespace VULKAN_HPP_NAMESPACE Image( Image const & ) = delete; Image( Image && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_image( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_image, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_image( VULKAN_HPP_NAMESPACE::exchange( rhs.m_image, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -8679,7 +8713,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_image, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_image, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -8782,10 +8816,10 @@ namespace VULKAN_HPP_NAMESPACE ImageView( ImageView const & ) = delete; ImageView( ImageView && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_imageView( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_imageView, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_imageView( VULKAN_HPP_NAMESPACE::exchange( rhs.m_imageView, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -8831,7 +8865,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_imageView, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_imageView, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -8905,10 +8939,10 @@ namespace VULKAN_HPP_NAMESPACE IndirectCommandsLayoutNV( IndirectCommandsLayoutNV const & ) = delete; IndirectCommandsLayoutNV( IndirectCommandsLayoutNV && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_indirectCommandsLayout( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_indirectCommandsLayout, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_indirectCommandsLayout( VULKAN_HPP_NAMESPACE::exchange( rhs.m_indirectCommandsLayout, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -8955,7 +8989,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_indirectCommandsLayout, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_indirectCommandsLayout, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -9025,10 +9059,10 @@ namespace VULKAN_HPP_NAMESPACE MicromapEXT( MicromapEXT const & ) = delete; MicromapEXT( MicromapEXT && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_micromap( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_micromap, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_micromap( VULKAN_HPP_NAMESPACE::exchange( rhs.m_micromap, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -9074,7 +9108,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_micromap, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_micromap, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -9144,10 +9178,10 @@ namespace VULKAN_HPP_NAMESPACE OpticalFlowSessionNV( OpticalFlowSessionNV const & ) = delete; OpticalFlowSessionNV( OpticalFlowSessionNV && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_session( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_session, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_session( VULKAN_HPP_NAMESPACE::exchange( rhs.m_session, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -9194,7 +9228,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_session, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_session, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -9264,9 +9298,9 @@ namespace VULKAN_HPP_NAMESPACE PerformanceConfigurationINTEL( PerformanceConfigurationINTEL const & ) = delete; PerformanceConfigurationINTEL( PerformanceConfigurationINTEL && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_configuration( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_configuration, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_configuration( VULKAN_HPP_NAMESPACE::exchange( rhs.m_configuration, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -9309,7 +9343,7 @@ namespace VULKAN_HPP_NAMESPACE { m_device = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_configuration, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_configuration, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -9377,10 +9411,10 @@ namespace VULKAN_HPP_NAMESPACE PipelineCache( PipelineCache const & ) = delete; PipelineCache( PipelineCache && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_pipelineCache( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_pipelineCache, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_pipelineCache( VULKAN_HPP_NAMESPACE::exchange( rhs.m_pipelineCache, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -9427,7 +9461,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_pipelineCache, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_pipelineCache, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -9549,11 +9583,11 @@ namespace VULKAN_HPP_NAMESPACE Pipeline( Pipeline const & ) = delete; Pipeline( Pipeline && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_pipeline( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_pipeline, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_constructorSuccessCode( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_constructorSuccessCode, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_pipeline( VULKAN_HPP_NAMESPACE::exchange( rhs.m_pipeline, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_constructorSuccessCode( VULKAN_HPP_NAMESPACE::exchange( rhs.m_constructorSuccessCode, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -9602,7 +9636,7 @@ namespace VULKAN_HPP_NAMESPACE m_allocator = nullptr; m_constructorSuccessCode = VULKAN_HPP_NAMESPACE::Result::eErrorUnknown; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_pipeline, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_pipeline, nullptr ); } VULKAN_HPP_NAMESPACE::Result getConstructorSuccessCode() const @@ -9750,6 +9784,157 @@ namespace VULKAN_HPP_NAMESPACE } }; + class PipelineBinaryKHR + { + public: + using CType = VkPipelineBinaryKHR; + using CppType = VULKAN_HPP_NAMESPACE::PipelineBinaryKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::ePipelineBinaryKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + + public: + PipelineBinaryKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VkPipelineBinaryKHR pipelineBinary, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr, + VULKAN_HPP_NAMESPACE::Result successCode = VULKAN_HPP_NAMESPACE::Result::eSuccess ) + : m_device( device ) + , m_pipelineBinary( pipelineBinary ) + , m_allocator( static_cast( allocator ) ) + , m_constructorSuccessCode( successCode ) + , m_dispatcher( device.getDispatcher() ) + { + } + + PipelineBinaryKHR( std::nullptr_t ) {} + + ~PipelineBinaryKHR() + { + clear(); + } + + PipelineBinaryKHR() = delete; + PipelineBinaryKHR( PipelineBinaryKHR const & ) = delete; + + PipelineBinaryKHR( PipelineBinaryKHR && rhs ) VULKAN_HPP_NOEXCEPT + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_pipelineBinary( VULKAN_HPP_NAMESPACE::exchange( rhs.m_pipelineBinary, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_constructorSuccessCode( VULKAN_HPP_NAMESPACE::exchange( rhs.m_constructorSuccessCode, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + { + } + + PipelineBinaryKHR & operator=( PipelineBinaryKHR const & ) = delete; + + PipelineBinaryKHR & operator=( PipelineBinaryKHR && rhs ) VULKAN_HPP_NOEXCEPT + { + if ( this != &rhs ) + { + std::swap( m_device, rhs.m_device ); + std::swap( m_pipelineBinary, rhs.m_pipelineBinary ); + std::swap( m_allocator, rhs.m_allocator ); + std::swap( m_constructorSuccessCode, rhs.m_constructorSuccessCode ); + std::swap( m_dispatcher, rhs.m_dispatcher ); + } + return *this; + } + + VULKAN_HPP_NAMESPACE::PipelineBinaryKHR const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineBinary; + } + + operator VULKAN_HPP_NAMESPACE::PipelineBinaryKHR() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineBinary; + } + + void clear() VULKAN_HPP_NOEXCEPT + { + if ( m_pipelineBinary ) + { + getDispatcher()->vkDestroyPipelineBinaryKHR( static_cast( m_device ), + static_cast( m_pipelineBinary ), + reinterpret_cast( m_allocator ) ); + } + m_device = nullptr; + m_pipelineBinary = nullptr; + m_allocator = nullptr; + m_constructorSuccessCode = VULKAN_HPP_NAMESPACE::Result::eErrorUnknown; + m_dispatcher = nullptr; + } + + VULKAN_HPP_NAMESPACE::PipelineBinaryKHR release() + { + m_device = nullptr; + m_allocator = nullptr; + m_constructorSuccessCode = VULKAN_HPP_NAMESPACE::Result::eErrorUnknown; + m_dispatcher = nullptr; + return VULKAN_HPP_NAMESPACE::exchange( m_pipelineBinary, nullptr ); + } + + VULKAN_HPP_NAMESPACE::Result getConstructorSuccessCode() const + { + return m_constructorSuccessCode; + } + + VULKAN_HPP_NAMESPACE::Device getDevice() const + { + return m_device; + } + + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::DeviceDispatcher const * getDispatcher() const + { + VULKAN_HPP_ASSERT( m_dispatcher->getVkHeaderVersion() == VK_HEADER_VERSION ); + return m_dispatcher; + } + + void swap( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::PipelineBinaryKHR & rhs ) VULKAN_HPP_NOEXCEPT + { + std::swap( m_device, rhs.m_device ); + std::swap( m_pipelineBinary, rhs.m_pipelineBinary ); + std::swap( m_allocator, rhs.m_allocator ); + std::swap( m_constructorSuccessCode, rhs.m_constructorSuccessCode ); + std::swap( m_dispatcher, rhs.m_dispatcher ); + } + + private: + VULKAN_HPP_NAMESPACE::Device m_device = {}; + VULKAN_HPP_NAMESPACE::PipelineBinaryKHR m_pipelineBinary = {}; + const VULKAN_HPP_NAMESPACE::AllocationCallbacks * m_allocator = {}; + VULKAN_HPP_NAMESPACE::Result m_constructorSuccessCode = VULKAN_HPP_NAMESPACE::Result::eErrorUnknown; + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::DeviceDispatcher const * m_dispatcher = nullptr; + }; + + class PipelineBinaryKHRs : public std::vector + { + public: +# if !defined( VULKAN_HPP_RAII_NO_EXCEPTIONS ) + PipelineBinaryKHRs( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, + VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) + { + *this = device.createPipelineBinariesKHR( createInfo, allocator ); + } +# endif + + PipelineBinaryKHRs( std::nullptr_t ) {} + + PipelineBinaryKHRs() = delete; + PipelineBinaryKHRs( PipelineBinaryKHRs const & ) = delete; + PipelineBinaryKHRs( PipelineBinaryKHRs && rhs ) = default; + PipelineBinaryKHRs & operator=( PipelineBinaryKHRs const & ) = delete; + PipelineBinaryKHRs & operator=( PipelineBinaryKHRs && rhs ) = default; + + private: + PipelineBinaryKHRs( std::vector && rhs ) + { + std::swap( *this, rhs ); + } + }; + class PipelineLayout { public: @@ -9791,10 +9976,10 @@ namespace VULKAN_HPP_NAMESPACE PipelineLayout( PipelineLayout const & ) = delete; PipelineLayout( PipelineLayout && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_pipelineLayout( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_pipelineLayout, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_pipelineLayout( VULKAN_HPP_NAMESPACE::exchange( rhs.m_pipelineLayout, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -9841,7 +10026,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_pipelineLayout, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_pipelineLayout, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -9911,10 +10096,10 @@ namespace VULKAN_HPP_NAMESPACE PrivateDataSlot( PrivateDataSlot const & ) = delete; PrivateDataSlot( PrivateDataSlot && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_privateDataSlot( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_privateDataSlot, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_privateDataSlot( VULKAN_HPP_NAMESPACE::exchange( rhs.m_privateDataSlot, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -9961,7 +10146,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_privateDataSlot, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_privateDataSlot, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -10031,10 +10216,10 @@ namespace VULKAN_HPP_NAMESPACE QueryPool( QueryPool const & ) = delete; QueryPool( QueryPool && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_queryPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_queryPool, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_queryPool( VULKAN_HPP_NAMESPACE::exchange( rhs.m_queryPool, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -10080,7 +10265,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_queryPool, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_queryPool, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -10175,8 +10360,8 @@ namespace VULKAN_HPP_NAMESPACE Queue( Queue const & rhs ) : m_queue( rhs.m_queue ), m_dispatcher( rhs.m_dispatcher ) {} Queue( Queue && rhs ) VULKAN_HPP_NOEXCEPT - : m_queue( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_queue, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_queue( VULKAN_HPP_NAMESPACE::exchange( rhs.m_queue, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -10216,7 +10401,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Queue release() { m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_queue, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_queue, nullptr ); } VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::DeviceDispatcher const * getDispatcher() const @@ -10332,10 +10517,10 @@ namespace VULKAN_HPP_NAMESPACE RenderPass( RenderPass const & ) = delete; RenderPass( RenderPass && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_renderPass( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_renderPass, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_renderPass( VULKAN_HPP_NAMESPACE::exchange( rhs.m_renderPass, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -10381,7 +10566,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_renderPass, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_renderPass, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -10459,10 +10644,10 @@ namespace VULKAN_HPP_NAMESPACE Sampler( Sampler const & ) = delete; Sampler( Sampler && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_sampler( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_sampler, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_sampler( VULKAN_HPP_NAMESPACE::exchange( rhs.m_sampler, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -10508,7 +10693,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_sampler, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_sampler, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -10578,10 +10763,10 @@ namespace VULKAN_HPP_NAMESPACE SamplerYcbcrConversion( SamplerYcbcrConversion const & ) = delete; SamplerYcbcrConversion( SamplerYcbcrConversion && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_ycbcrConversion( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_ycbcrConversion, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_ycbcrConversion( VULKAN_HPP_NAMESPACE::exchange( rhs.m_ycbcrConversion, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -10628,7 +10813,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_ycbcrConversion, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_ycbcrConversion, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -10698,10 +10883,10 @@ namespace VULKAN_HPP_NAMESPACE Semaphore( Semaphore const & ) = delete; Semaphore( Semaphore && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_semaphore( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_semaphore, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_semaphore( VULKAN_HPP_NAMESPACE::exchange( rhs.m_semaphore, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -10747,7 +10932,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_semaphore, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_semaphore, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -10827,11 +11012,11 @@ namespace VULKAN_HPP_NAMESPACE ShaderEXT( ShaderEXT const & ) = delete; ShaderEXT( ShaderEXT && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_shader( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_shader, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_constructorSuccessCode( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_constructorSuccessCode, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_shader( VULKAN_HPP_NAMESPACE::exchange( rhs.m_shader, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_constructorSuccessCode( VULKAN_HPP_NAMESPACE::exchange( rhs.m_constructorSuccessCode, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -10880,7 +11065,7 @@ namespace VULKAN_HPP_NAMESPACE m_allocator = nullptr; m_constructorSuccessCode = VULKAN_HPP_NAMESPACE::Result::eErrorUnknown; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_shader, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_shader, nullptr ); } VULKAN_HPP_NAMESPACE::Result getConstructorSuccessCode() const @@ -10988,10 +11173,10 @@ namespace VULKAN_HPP_NAMESPACE ShaderModule( ShaderModule const & ) = delete; ShaderModule( ShaderModule && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_shaderModule( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_shaderModule, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_shaderModule( VULKAN_HPP_NAMESPACE::exchange( rhs.m_shaderModule, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -11037,7 +11222,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_shaderModule, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_shaderModule, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -11263,10 +11448,10 @@ namespace VULKAN_HPP_NAMESPACE SurfaceKHR( SurfaceKHR const & ) = delete; SurfaceKHR( SurfaceKHR && rhs ) VULKAN_HPP_NOEXCEPT - : m_instance( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_instance, {} ) ) - , m_surface( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_surface, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_instance( VULKAN_HPP_NAMESPACE::exchange( rhs.m_instance, {} ) ) + , m_surface( VULKAN_HPP_NAMESPACE::exchange( rhs.m_surface, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -11312,7 +11497,7 @@ namespace VULKAN_HPP_NAMESPACE m_instance = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_surface, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_surface, nullptr ); } VULKAN_HPP_NAMESPACE::Instance getInstance() const @@ -11382,10 +11567,10 @@ namespace VULKAN_HPP_NAMESPACE SwapchainKHR( SwapchainKHR const & ) = delete; SwapchainKHR( SwapchainKHR && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_swapchain( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_swapchain, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_swapchain( VULKAN_HPP_NAMESPACE::exchange( rhs.m_swapchain, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -11431,7 +11616,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_swapchain, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_swapchain, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -11500,7 +11685,7 @@ namespace VULKAN_HPP_NAMESPACE void setLatencyMarkerNV( const VULKAN_HPP_NAMESPACE::SetLatencyMarkerInfoNV & latencyMarkerInfo ) const VULKAN_HPP_NOEXCEPT; - VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::GetLatencyMarkerInfoNV getLatencyTimingsNV() const VULKAN_HPP_NOEXCEPT; + VULKAN_HPP_NODISCARD std::vector getLatencyTimingsNV() const; private: VULKAN_HPP_NAMESPACE::Device m_device = {}; @@ -11577,10 +11762,10 @@ namespace VULKAN_HPP_NAMESPACE ValidationCacheEXT( ValidationCacheEXT const & ) = delete; ValidationCacheEXT( ValidationCacheEXT && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_validationCache( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_validationCache, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_validationCache( VULKAN_HPP_NAMESPACE::exchange( rhs.m_validationCache, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -11627,7 +11812,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_validationCache, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_validationCache, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -11703,10 +11888,10 @@ namespace VULKAN_HPP_NAMESPACE VideoSessionKHR( VideoSessionKHR const & ) = delete; VideoSessionKHR( VideoSessionKHR && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_videoSession( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_videoSession, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_videoSession( VULKAN_HPP_NAMESPACE::exchange( rhs.m_videoSession, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -11753,7 +11938,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_videoSession, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_videoSession, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -11829,10 +12014,10 @@ namespace VULKAN_HPP_NAMESPACE VideoSessionParametersKHR( VideoSessionParametersKHR const & ) = delete; VideoSessionParametersKHR( VideoSessionParametersKHR && rhs ) VULKAN_HPP_NOEXCEPT - : m_device( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_device, {} ) ) - , m_videoSessionParameters( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_videoSessionParameters, {} ) ) - , m_allocator( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_allocator, {} ) ) - , m_dispatcher( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) + : m_device( VULKAN_HPP_NAMESPACE::exchange( rhs.m_device, {} ) ) + , m_videoSessionParameters( VULKAN_HPP_NAMESPACE::exchange( rhs.m_videoSessionParameters, {} ) ) + , m_allocator( VULKAN_HPP_NAMESPACE::exchange( rhs.m_allocator, {} ) ) + , m_dispatcher( VULKAN_HPP_NAMESPACE::exchange( rhs.m_dispatcher, nullptr ) ) { } @@ -11879,7 +12064,7 @@ namespace VULKAN_HPP_NAMESPACE m_device = nullptr; m_allocator = nullptr; m_dispatcher = nullptr; - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::exchange( m_videoSessionParameters, nullptr ); + return VULKAN_HPP_NAMESPACE::exchange( m_videoSessionParameters, nullptr ); } VULKAN_HPP_NAMESPACE::Device getDevice() const @@ -12018,7 +12203,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( usage ), static_cast( flags ), reinterpret_cast( &imageFormatProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties" ); return imageFormatProperties; } @@ -12128,7 +12313,7 @@ namespace VULKAN_HPP_NAMESPACE layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Context::enumerateInstanceExtensionProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Context::enumerateInstanceExtensionProperties" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { @@ -12159,7 +12344,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateDeviceExtensionProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateDeviceExtensionProperties" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { @@ -12185,7 +12370,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkEnumerateInstanceLayerProperties( &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Context::enumerateInstanceLayerProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Context::enumerateInstanceLayerProperties" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { @@ -12212,7 +12397,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_physicalDevice ), &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateDeviceLayerProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateDeviceLayerProperties" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { @@ -12238,7 +12423,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkQueueSubmit( static_cast( m_queue ), submits.size(), reinterpret_cast( submits.data() ), static_cast( fence ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit" ); } VULKAN_HPP_INLINE void Queue::waitIdle() const @@ -12246,7 +12431,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_ASSERT( getDispatcher()->vkQueueWaitIdle && "Function requires " ); VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkQueueWaitIdle( static_cast( m_queue ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::waitIdle" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::waitIdle" ); } VULKAN_HPP_INLINE void Device::waitIdle() const @@ -12254,7 +12439,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_ASSERT( getDispatcher()->vkDeviceWaitIdle && "Function requires " ); VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkDeviceWaitIdle( static_cast( m_device ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitIdle" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitIdle" ); } VULKAN_HPP_NODISCARD @@ -12293,7 +12478,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( size ), static_cast( flags ), &pData ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::DeviceMemory::mapMemory" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::DeviceMemory::mapMemory" ); return pData; } @@ -12312,7 +12497,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkFlushMappedMemoryRanges( static_cast( m_device ), memoryRanges.size(), reinterpret_cast( memoryRanges.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::flushMappedMemoryRanges" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::flushMappedMemoryRanges" ); } VULKAN_HPP_INLINE void @@ -12322,7 +12507,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkInvalidateMappedMemoryRanges( static_cast( m_device ), memoryRanges.size(), reinterpret_cast( memoryRanges.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::invalidateMappedMemoryRanges" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::invalidateMappedMemoryRanges" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DeviceSize DeviceMemory::getCommitment() const VULKAN_HPP_NOEXCEPT @@ -12345,7 +12530,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_buffer ), static_cast( memory ), static_cast( memoryOffset ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Buffer::bindMemory" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Buffer::bindMemory" ); } VULKAN_HPP_INLINE void Image::bindMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset ) const @@ -12357,7 +12542,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_image ), static_cast( memory ), static_cast( memoryOffset ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Image::bindMemory" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Image::bindMemory" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements Buffer::getMemoryRequirements() const VULKAN_HPP_NOEXCEPT @@ -12449,7 +12634,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkQueueBindSparse( static_cast( m_queue ), bindInfo.size(), reinterpret_cast( bindInfo.data() ), static_cast( fence ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::bindSparse" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::bindSparse" ); } VULKAN_HPP_NODISCARD @@ -12481,7 +12666,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkResetFences( static_cast( m_device ), fences.size(), reinterpret_cast( fences.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetFences" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetFences" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Fence::getStatus() const @@ -12490,7 +12675,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetFenceStatus( static_cast( m_device ), static_cast( m_fence ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Fence::getStatus", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); return static_cast( result ); @@ -12503,7 +12688,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkWaitForFences( static_cast( m_device ), fences.size(), reinterpret_cast( fences.data() ), static_cast( waitAll ), timeout ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitForFences", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout } ); return static_cast( result ); @@ -12561,7 +12746,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetEventStatus( static_cast( m_device ), static_cast( m_event ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Event::getStatus", { VULKAN_HPP_NAMESPACE::Result::eEventSet, VULKAN_HPP_NAMESPACE::Result::eEventReset } ); return static_cast( result ); @@ -12573,7 +12758,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkSetEvent( static_cast( m_device ), static_cast( m_event ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Event::set" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Event::set" ); } VULKAN_HPP_INLINE void Event::reset() const @@ -12582,7 +12767,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkResetEvent( static_cast( m_device ), static_cast( m_event ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Event::reset" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Event::reset" ); } VULKAN_HPP_NODISCARD @@ -12625,7 +12810,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( data.data() ), static_cast( stride ), static_cast( flags ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::QueryPool::getResults", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); return std::make_pair( result, std::move( data ) ); @@ -12647,7 +12832,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &data ), static_cast( stride ), static_cast( flags ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::QueryPool::getResult", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); return std::make_pair( result, std::move( data ) ); @@ -12826,7 +13011,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_device ), static_cast( m_pipelineCache ), &dataSize, reinterpret_cast( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PipelineCache::getData" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PipelineCache::getData" ); VULKAN_HPP_ASSERT( dataSize <= data.size() ); if ( dataSize < data.size() ) { @@ -12844,7 +13029,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_pipelineCache ), srcCaches.size(), reinterpret_cast( srcCaches.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PipelineCache::merge" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PipelineCache::merge" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE @@ -12904,7 +13089,7 @@ namespace VULKAN_HPP_NAMESPACE # endif } - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Pipeline( *this, *reinterpret_cast( &pipeline ), allocator ); + return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Pipeline( *this, *reinterpret_cast( &pipeline ), allocator, result ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE @@ -12964,7 +13149,7 @@ namespace VULKAN_HPP_NAMESPACE # endif } - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Pipeline( *this, *reinterpret_cast( &pipeline ), allocator ); + return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Pipeline( *this, *reinterpret_cast( &pipeline ), allocator, result ); } VULKAN_HPP_NODISCARD @@ -13200,7 +13385,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkResetCommandPool( static_cast( m_device ), static_cast( m_commandPool ), static_cast( flags ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandPool::reset" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandPool::reset" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE @@ -13237,7 +13422,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkBeginCommandBuffer( static_cast( m_commandBuffer ), reinterpret_cast( &beginInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::begin" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::begin" ); } VULKAN_HPP_INLINE void CommandBuffer::end() const @@ -13246,7 +13431,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkEndCommandBuffer( static_cast( m_commandBuffer ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::end" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::end" ); } VULKAN_HPP_INLINE void CommandBuffer::reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags ) const @@ -13255,7 +13440,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkResetCommandBuffer( static_cast( m_commandBuffer ), static_cast( flags ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::reset" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::reset" ); } VULKAN_HPP_INLINE void CommandBuffer::bindPipeline( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, @@ -13793,7 +13978,7 @@ namespace VULKAN_HPP_NAMESPACE uint32_t apiVersion; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkEnumerateInstanceVersion( &apiVersion ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Context::enumerateInstanceVersion" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Context::enumerateInstanceVersion" ); return apiVersion; } @@ -13805,7 +13990,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkBindBufferMemory2( static_cast( m_device ), bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory2" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory2" ); } VULKAN_HPP_INLINE void Device::bindImageMemory2( VULKAN_HPP_NAMESPACE::ArrayProxy const & bindInfos ) const @@ -13814,7 +13999,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkBindImageMemory2( static_cast( m_device ), bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory2" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory2" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags @@ -13874,7 +14059,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( physicalDeviceGroupProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDeviceGroups" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDeviceGroups" ); VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); if ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) { @@ -14058,7 +14243,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetPhysicalDeviceImageFormatProperties2( static_cast( m_physicalDevice ), reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2" ); return imageFormatProperties; } @@ -14076,7 +14261,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetPhysicalDeviceImageFormatProperties2( static_cast( m_physicalDevice ), reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2" ); return structureChain; } @@ -14450,7 +14635,7 @@ namespace VULKAN_HPP_NAMESPACE uint64_t value; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetSemaphoreCounterValue( static_cast( m_device ), static_cast( m_semaphore ), &value ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Semaphore::getCounterValue" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Semaphore::getCounterValue" ); return value; } @@ -14462,7 +14647,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkWaitSemaphores( static_cast( m_device ), reinterpret_cast( &waitInfo ), timeout ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitSemaphores", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout } ); return static_cast( result ); @@ -14474,7 +14659,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkSignalSemaphore( static_cast( m_device ), reinterpret_cast( &signalInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::signalSemaphore" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::signalSemaphore" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DeviceAddress @@ -14534,7 +14719,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_physicalDevice ), &toolCount, reinterpret_cast( toolProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getToolProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getToolProperties" ); VULKAN_HPP_ASSERT( toolCount <= toolProperties.size() ); if ( toolCount < toolProperties.size() ) { @@ -14576,7 +14761,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkSetPrivateData( static_cast( m_device ), static_cast( objectType_ ), objectHandle, static_cast( privateDataSlot ), data ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setPrivateData" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setPrivateData" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE uint64_t Device::getPrivateData( VULKAN_HPP_NAMESPACE::ObjectType objectType_, @@ -14654,7 +14839,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkQueueSubmit2( static_cast( m_queue ), submits.size(), reinterpret_cast( submits.data() ), static_cast( fence ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit2" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit2" ); } VULKAN_HPP_INLINE void CommandBuffer::copyBuffer2( const VULKAN_HPP_NAMESPACE::CopyBufferInfo2 & copyBufferInfo ) const VULKAN_HPP_NOEXCEPT @@ -14978,7 +15163,7 @@ namespace VULKAN_HPP_NAMESPACE queueFamilyIndex, static_cast( surface ), reinterpret_cast( &supported ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceSupportKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceSupportKHR" ); return supported; } @@ -14994,7 +15179,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetPhysicalDeviceSurfaceCapabilitiesKHR( static_cast( m_physicalDevice ), static_cast( surface ), reinterpret_cast( &surfaceCapabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilitiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilitiesKHR" ); return surfaceCapabilities; } @@ -15021,7 +15206,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( surfaceFormats.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormatsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormatsKHR" ); VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); if ( surfaceFormatCount < surfaceFormats.size() ) { @@ -15053,7 +15238,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( presentModes.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfacePresentModesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfacePresentModesKHR" ); VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); if ( presentModeCount < presentModes.size() ) { @@ -15109,7 +15294,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( swapchainImages.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getImages" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getImages" ); VULKAN_HPP_ASSERT( swapchainImageCount <= swapchainImages.size() ); if ( swapchainImageCount < swapchainImages.size() ) { @@ -15131,12 +15316,12 @@ namespace VULKAN_HPP_NAMESPACE static_cast( semaphore ), static_cast( fence ), &imageIndex ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::acquireNextImage", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eTimeout, - VULKAN_HPP_NAMESPACE::Result::eNotReady, - VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::acquireNextImage", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eTimeout, + VULKAN_HPP_NAMESPACE::Result::eNotReady, + VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); return std::make_pair( result, std::move( imageIndex ) ); } @@ -15147,7 +15332,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkQueuePresentKHR( static_cast( m_queue ), reinterpret_cast( &presentInfo ) ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::presentKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); return static_cast( result ); @@ -15161,7 +15346,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR deviceGroupPresentCapabilities; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetDeviceGroupPresentCapabilitiesKHR( static_cast( m_device ), reinterpret_cast( &deviceGroupPresentCapabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupPresentCapabilitiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupPresentCapabilitiesKHR" ); return deviceGroupPresentCapabilities; } @@ -15175,7 +15360,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetDeviceGroupSurfacePresentModesKHR( static_cast( m_device ), static_cast( surface ), reinterpret_cast( &modes ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupSurfacePresentModesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupSurfacePresentModesKHR" ); return modes; } @@ -15203,7 +15388,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( rects.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getPresentRectanglesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getPresentRectanglesKHR" ); VULKAN_HPP_ASSERT( rectCount <= rects.size() ); if ( rectCount < rects.size() ) { @@ -15220,12 +15405,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t imageIndex; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkAcquireNextImage2KHR( static_cast( m_device ), reinterpret_cast( &acquireInfo ), &imageIndex ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::acquireNextImage2KHR", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eTimeout, - VULKAN_HPP_NAMESPACE::Result::eNotReady, - VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::acquireNextImage2KHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eTimeout, + VULKAN_HPP_NAMESPACE::Result::eNotReady, + VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); return std::make_pair( result, std::move( imageIndex ) ); } @@ -15251,7 +15436,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_physicalDevice ), &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPropertiesKHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { @@ -15279,7 +15464,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_physicalDevice ), &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlanePropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlanePropertiesKHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { @@ -15345,7 +15530,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::DisplayKHR::getModeProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::DisplayKHR::getModeProperties" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { @@ -15389,7 +15574,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_displayModeKHR ), planeIndex, reinterpret_cast( &capabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::DisplayModeKHR::getDisplayPlaneCapabilities" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::DisplayModeKHR::getDisplayPlaneCapabilities" ); return capabilities; } @@ -15717,7 +15902,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkDebugMarkerSetObjectTagEXT( static_cast( m_device ), reinterpret_cast( &tagInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::debugMarkerSetObjectTagEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::debugMarkerSetObjectTagEXT" ); } VULKAN_HPP_INLINE void Device::debugMarkerSetObjectNameEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerObjectNameInfoEXT & nameInfo ) const @@ -15726,7 +15911,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkDebugMarkerSetObjectNameEXT( static_cast( m_device ), reinterpret_cast( &nameInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::debugMarkerSetObjectNameEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::debugMarkerSetObjectNameEXT" ); } VULKAN_HPP_INLINE void CommandBuffer::debugMarkerBeginEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerMarkerInfoEXT & markerInfo ) const VULKAN_HPP_NOEXCEPT @@ -15765,7 +15950,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetPhysicalDeviceVideoCapabilitiesKHR( static_cast( m_physicalDevice ), reinterpret_cast( &videoProfile ), reinterpret_cast( &capabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoCapabilitiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoCapabilitiesKHR" ); return capabilities; } @@ -15783,7 +15968,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetPhysicalDeviceVideoCapabilitiesKHR( static_cast( m_physicalDevice ), reinterpret_cast( &videoProfile ), reinterpret_cast( &capabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoCapabilitiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoCapabilitiesKHR" ); return structureChain; } @@ -15814,7 +15999,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( videoFormatProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoFormatPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoFormatPropertiesKHR" ); VULKAN_HPP_ASSERT( videoFormatPropertyCount <= videoFormatProperties.size() ); if ( videoFormatPropertyCount < videoFormatProperties.size() ) { @@ -15888,7 +16073,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_videoSession ), bindSessionMemoryInfos.size(), reinterpret_cast( bindSessionMemoryInfos.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::VideoSessionKHR::bindMemory" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::VideoSessionKHR::bindMemory" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE @@ -15924,7 +16109,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkUpdateVideoSessionParametersKHR( static_cast( m_device ), static_cast( m_videoSessionParameters ), reinterpret_cast( &updateInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::VideoSessionParametersKHR::update" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::VideoSessionParametersKHR::update" ); } VULKAN_HPP_INLINE void CommandBuffer::beginVideoCodingKHR( const VULKAN_HPP_NAMESPACE::VideoBeginCodingInfoKHR & beginInfo ) const VULKAN_HPP_NOEXCEPT @@ -16151,7 +16336,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX properties; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetImageViewAddressNVX( static_cast( m_device ), static_cast( m_imageView ), reinterpret_cast( &properties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::ImageView::getAddressNVX" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::ImageView::getAddressNVX" ); return properties; } @@ -16226,7 +16411,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( info.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getShaderInfoAMD" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getShaderInfoAMD" ); VULKAN_HPP_ASSERT( infoSize <= info.size() ); if ( infoSize < info.size() ) { @@ -16303,7 +16488,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( flags ), static_cast( externalHandleType ), reinterpret_cast( &externalImageFormatProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getExternalImageFormatPropertiesNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getExternalImageFormatPropertiesNV" ); return externalImageFormatProperties; } @@ -16318,7 +16503,7 @@ namespace VULKAN_HPP_NAMESPACE HANDLE handle; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetMemoryWin32HandleNV( static_cast( m_device ), static_cast( m_memory ), static_cast( handleType ), &handle ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::DeviceMemory::getMemoryWin32HandleNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::DeviceMemory::getMemoryWin32HandleNV" ); return handle; } @@ -16417,7 +16602,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetPhysicalDeviceImageFormatProperties2KHR( static_cast( m_physicalDevice ), reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2KHR" ); return imageFormatProperties; } @@ -16435,7 +16620,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetPhysicalDeviceImageFormatProperties2KHR( static_cast( m_physicalDevice ), reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2KHR" ); return structureChain; } @@ -16647,7 +16832,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( physicalDeviceGroupProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDeviceGroupsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Instance::enumeratePhysicalDeviceGroupsKHR" ); VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); if ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) { @@ -16683,7 +16868,7 @@ namespace VULKAN_HPP_NAMESPACE HANDLE handle; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetMemoryWin32HandleKHR( static_cast( m_device ), reinterpret_cast( &getWin32HandleInfo ), &handle ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandleKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandleKHR" ); return handle; } @@ -16700,7 +16885,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( handleType ), handle, reinterpret_cast( &memoryWin32HandleProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandlePropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandlePropertiesKHR" ); return memoryWin32HandleProperties; } @@ -16715,7 +16900,7 @@ namespace VULKAN_HPP_NAMESPACE int fd; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetMemoryFdKHR( static_cast( m_device ), reinterpret_cast( &getFdInfo ), &fd ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryFdKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryFdKHR" ); return fd; } @@ -16731,7 +16916,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( handleType ), fd, reinterpret_cast( &memoryFdProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryFdPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryFdPropertiesKHR" ); return memoryFdProperties; } @@ -16764,7 +16949,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkImportSemaphoreWin32HandleKHR( static_cast( m_device ), reinterpret_cast( &importSemaphoreWin32HandleInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreWin32HandleKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreWin32HandleKHR" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE HANDLE @@ -16776,7 +16961,7 @@ namespace VULKAN_HPP_NAMESPACE HANDLE handle; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetSemaphoreWin32HandleKHR( static_cast( m_device ), reinterpret_cast( &getWin32HandleInfo ), &handle ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreWin32HandleKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreWin32HandleKHR" ); return handle; } @@ -16790,7 +16975,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkImportSemaphoreFdKHR( static_cast( m_device ), reinterpret_cast( &importSemaphoreFdInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreFdKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreFdKHR" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE int Device::getSemaphoreFdKHR( const VULKAN_HPP_NAMESPACE::SemaphoreGetFdInfoKHR & getFdInfo ) const @@ -16800,7 +16985,7 @@ namespace VULKAN_HPP_NAMESPACE int fd; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetSemaphoreFdKHR( static_cast( m_device ), reinterpret_cast( &getFdInfo ), &fd ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreFdKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreFdKHR" ); return fd; } @@ -16935,7 +17120,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkAcquireXlibDisplayEXT( static_cast( m_physicalDevice ), &dpy, static_cast( display ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::acquireXlibDisplayEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::acquireXlibDisplayEXT" ); } VULKAN_HPP_NODISCARD @@ -16971,7 +17156,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetPhysicalDeviceSurfaceCapabilities2EXT( static_cast( m_physicalDevice ), static_cast( surface ), reinterpret_cast( &surfaceCapabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2EXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2EXT" ); return surfaceCapabilities; } @@ -16985,7 +17170,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkDisplayPowerControlEXT( static_cast( m_device ), static_cast( display ), reinterpret_cast( &displayPowerInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::displayPowerControlEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::displayPowerControlEXT" ); } VULKAN_HPP_NODISCARD @@ -17045,7 +17230,7 @@ namespace VULKAN_HPP_NAMESPACE uint64_t counterValue; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetSwapchainCounterEXT( static_cast( m_device ), static_cast( m_swapchain ), static_cast( counter ), &counterValue ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getCounterEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getCounterEXT" ); return counterValue; } @@ -17061,7 +17246,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetRefreshCycleDurationGOOGLE( static_cast( m_device ), static_cast( m_swapchain ), reinterpret_cast( &displayTimingProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getRefreshCycleDurationGOOGLE" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getRefreshCycleDurationGOOGLE" ); return displayTimingProperties; } @@ -17088,7 +17273,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( presentationTimings.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getPastPresentationTimingGOOGLE" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getPastPresentationTimingGOOGLE" ); VULKAN_HPP_ASSERT( presentationTimingCount <= presentationTimings.size() ); if ( presentationTimingCount < presentationTimings.size() ) { @@ -17212,9 +17397,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetSwapchainStatusKHR( static_cast( m_device ), static_cast( m_swapchain ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getStatus", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::getStatus", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); return static_cast( result ); } @@ -17244,7 +17429,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkImportFenceWin32HandleKHR( static_cast( m_device ), reinterpret_cast( &importFenceWin32HandleInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importFenceWin32HandleKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importFenceWin32HandleKHR" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE HANDLE @@ -17255,7 +17440,7 @@ namespace VULKAN_HPP_NAMESPACE HANDLE handle; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetFenceWin32HandleKHR( static_cast( m_device ), reinterpret_cast( &getWin32HandleInfo ), &handle ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceWin32HandleKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceWin32HandleKHR" ); return handle; } @@ -17269,7 +17454,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkImportFenceFdKHR( static_cast( m_device ), reinterpret_cast( &importFenceFdInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importFenceFdKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importFenceFdKHR" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE int Device::getFenceFdKHR( const VULKAN_HPP_NAMESPACE::FenceGetFdInfoKHR & getFdInfo ) const @@ -17279,7 +17464,7 @@ namespace VULKAN_HPP_NAMESPACE int fd; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetFenceFdKHR( static_cast( m_device ), reinterpret_cast( &getFdInfo ), &fd ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceFdKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceFdKHR" ); return fd; } @@ -17314,7 +17499,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( counterDescriptions.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); VULKAN_HPP_ASSERT( counterCount <= counters.size() ); if ( counterCount < counters.size() ) { @@ -17345,7 +17530,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkAcquireProfilingLockKHR( static_cast( m_device ), reinterpret_cast( &info ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireProfilingLockKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireProfilingLockKHR" ); } VULKAN_HPP_INLINE void Device::releaseProfilingLockKHR() const VULKAN_HPP_NOEXCEPT @@ -17368,7 +17553,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetPhysicalDeviceSurfaceCapabilities2KHR( static_cast( m_physicalDevice ), reinterpret_cast( &surfaceInfo ), reinterpret_cast( &surfaceCapabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2KHR" ); return surfaceCapabilities; } @@ -17386,7 +17571,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetPhysicalDeviceSurfaceCapabilities2KHR( static_cast( m_physicalDevice ), reinterpret_cast( &surfaceInfo ), reinterpret_cast( &surfaceCapabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2KHR" ); return structureChain; } @@ -17417,7 +17602,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( surfaceFormats.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormats2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormats2KHR" ); VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); if ( surfaceFormatCount < surfaceFormats.size() ) { @@ -17459,7 +17644,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( surfaceFormats.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormats2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceFormats2KHR" ); VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); if ( surfaceFormatCount < surfaceFormats.size() ) { @@ -17493,7 +17678,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_physicalDevice ), &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayProperties2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayProperties2KHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { @@ -17521,7 +17706,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_physicalDevice ), &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneProperties2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneProperties2KHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { @@ -17552,7 +17737,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::DisplayKHR::getModeProperties2" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::DisplayKHR::getModeProperties2" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { @@ -17572,7 +17757,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetDisplayPlaneCapabilities2KHR( static_cast( m_physicalDevice ), reinterpret_cast( &displayPlaneInfo ), reinterpret_cast( &capabilities ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneCapabilities2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneCapabilities2KHR" ); return capabilities; } @@ -17641,7 +17826,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkSetDebugUtilsObjectNameEXT( static_cast( m_device ), reinterpret_cast( &nameInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setDebugUtilsObjectNameEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setDebugUtilsObjectNameEXT" ); } VULKAN_HPP_INLINE void Device::setDebugUtilsObjectTagEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsObjectTagInfoEXT & tagInfo ) const @@ -17650,7 +17835,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkSetDebugUtilsObjectTagEXT( static_cast( m_device ), reinterpret_cast( &tagInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setDebugUtilsObjectTagEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setDebugUtilsObjectTagEXT" ); } VULKAN_HPP_INLINE void Queue::beginDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT & labelInfo ) const VULKAN_HPP_NOEXCEPT @@ -17747,7 +17932,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID properties; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetAndroidHardwareBufferPropertiesANDROID( static_cast( m_device ), &buffer, reinterpret_cast( &properties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAndroidHardwareBufferPropertiesANDROID" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAndroidHardwareBufferPropertiesANDROID" ); return properties; } @@ -17764,7 +17949,7 @@ namespace VULKAN_HPP_NAMESPACE structureChain.template get(); VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetAndroidHardwareBufferPropertiesANDROID( static_cast( m_device ), &buffer, reinterpret_cast( &properties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAndroidHardwareBufferPropertiesANDROID" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAndroidHardwareBufferPropertiesANDROID" ); return structureChain; } @@ -17778,7 +17963,7 @@ namespace VULKAN_HPP_NAMESPACE struct AHardwareBuffer * buffer; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetMemoryAndroidHardwareBufferANDROID( static_cast( m_device ), reinterpret_cast( &info ), &buffer ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryAndroidHardwareBufferANDROID" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryAndroidHardwareBufferANDROID" ); return buffer; } @@ -17844,7 +18029,7 @@ namespace VULKAN_HPP_NAMESPACE # endif } - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Pipeline( *this, *reinterpret_cast( &pipeline ), allocator ); + return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Pipeline( *this, *reinterpret_cast( &pipeline ), allocator, result ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExecutionGraphPipelineScratchSizeAMDX Pipeline::getExecutionGraphScratchSizeAMDX() const @@ -17855,7 +18040,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ExecutionGraphPipelineScratchSizeAMDX sizeInfo; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetExecutionGraphPipelineScratchSizeAMDX( static_cast( m_device ), static_cast( m_pipeline ), reinterpret_cast( &sizeInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getExecutionGraphScratchSizeAMDX" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getExecutionGraphScratchSizeAMDX" ); return sizeInfo; } @@ -17872,7 +18057,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_pipeline ), reinterpret_cast( &nodeInfo ), &nodeIndex ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getExecutionGraphNodeIndexAMDX" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getExecutionGraphNodeIndexAMDX" ); return nodeIndex; } @@ -18135,11 +18320,11 @@ namespace VULKAN_HPP_NAMESPACE infos.size(), reinterpret_cast( infos.data() ), reinterpret_cast( pBuildRangeInfos.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::buildAccelerationStructuresKHR", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::buildAccelerationStructuresKHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); return static_cast( result ); } @@ -18155,11 +18340,11 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkCopyAccelerationStructureKHR( static_cast( m_device ), static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::copyAccelerationStructureKHR", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::copyAccelerationStructureKHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); return static_cast( result ); } @@ -18175,11 +18360,11 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkCopyAccelerationStructureToMemoryKHR( static_cast( m_device ), static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::copyAccelerationStructureToMemoryKHR", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::copyAccelerationStructureToMemoryKHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); return static_cast( result ); } @@ -18195,11 +18380,11 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkCopyMemoryToAccelerationStructureKHR( static_cast( m_device ), static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToAccelerationStructureKHR", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToAccelerationStructureKHR", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); return static_cast( result ); } @@ -18224,7 +18409,7 @@ namespace VULKAN_HPP_NAMESPACE data.size() * sizeof( DataType ), reinterpret_cast( data.data() ), stride ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeAccelerationStructuresPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeAccelerationStructuresPropertiesKHR" ); return data; } @@ -18247,7 +18432,7 @@ namespace VULKAN_HPP_NAMESPACE sizeof( DataType ), reinterpret_cast( &data ), stride ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeAccelerationStructuresPropertyKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeAccelerationStructuresPropertyKHR" ); return data; } @@ -18436,7 +18621,7 @@ namespace VULKAN_HPP_NAMESPACE # endif } - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Pipeline( *this, *reinterpret_cast( &pipeline ), allocator ); + return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Pipeline( *this, *reinterpret_cast( &pipeline ), allocator, result ); } template @@ -18455,7 +18640,7 @@ namespace VULKAN_HPP_NAMESPACE groupCount, data.size() * sizeof( DataType ), reinterpret_cast( data.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingShaderGroupHandlesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingShaderGroupHandlesKHR" ); return data; } @@ -18474,7 +18659,7 @@ namespace VULKAN_HPP_NAMESPACE groupCount, sizeof( DataType ), reinterpret_cast( &data ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingShaderGroupHandleKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingShaderGroupHandleKHR" ); return data; } @@ -18495,7 +18680,7 @@ namespace VULKAN_HPP_NAMESPACE groupCount, data.size() * sizeof( DataType ), reinterpret_cast( data.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingCaptureReplayShaderGroupHandlesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingCaptureReplayShaderGroupHandlesKHR" ); return data; } @@ -18514,7 +18699,7 @@ namespace VULKAN_HPP_NAMESPACE groupCount, sizeof( DataType ), reinterpret_cast( &data ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingCaptureReplayShaderGroupHandleKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingCaptureReplayShaderGroupHandleKHR" ); return data; } @@ -18604,7 +18789,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkBindBufferMemory2KHR( static_cast( m_device ), bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory2KHR" ); } VULKAN_HPP_INLINE void @@ -18614,7 +18799,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkBindImageMemory2KHR( static_cast( m_device ), bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory2KHR" ); } //=== VK_EXT_image_drm_format_modifier === @@ -18627,7 +18812,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT properties; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetImageDrmFormatModifierPropertiesEXT( static_cast( m_device ), static_cast( m_image ), reinterpret_cast( &properties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Image::getDrmFormatModifierPropertiesEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Image::getDrmFormatModifierPropertiesEXT" ); return properties; } @@ -18668,7 +18853,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_validationCache ), srcCaches.size(), reinterpret_cast( srcCaches.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::ValidationCacheEXT::merge" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::ValidationCacheEXT::merge" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector ValidationCacheEXT::getData() const @@ -18689,7 +18874,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_device ), static_cast( m_validationCache ), &dataSize, reinterpret_cast( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::ValidationCacheEXT::getData" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::ValidationCacheEXT::getData" ); VULKAN_HPP_ASSERT( dataSize <= data.size() ); if ( dataSize < data.size() ) { @@ -18799,7 +18984,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkBindAccelerationStructureMemoryNV( static_cast( m_device ), bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindAccelerationStructureMemoryNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindAccelerationStructureMemoryNV" ); } VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructureNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureInfoNV & info, @@ -18927,7 +19112,7 @@ namespace VULKAN_HPP_NAMESPACE # endif } - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Pipeline( *this, *reinterpret_cast( &pipeline ), allocator ); + return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Pipeline( *this, *reinterpret_cast( &pipeline ), allocator, result ); } template @@ -18946,7 +19131,7 @@ namespace VULKAN_HPP_NAMESPACE groupCount, data.size() * sizeof( DataType ), reinterpret_cast( data.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingShaderGroupHandlesNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingShaderGroupHandlesNV" ); return data; } @@ -18965,7 +19150,7 @@ namespace VULKAN_HPP_NAMESPACE groupCount, sizeof( DataType ), reinterpret_cast( &data ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingShaderGroupHandleNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::getRayTracingShaderGroupHandleNV" ); return data; } @@ -18982,7 +19167,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_accelerationStructure ), data.size() * sizeof( DataType ), reinterpret_cast( data.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::AccelerationStructureNV::getHandle" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::AccelerationStructureNV::getHandle" ); return data; } @@ -18998,7 +19183,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_accelerationStructure ), sizeof( DataType ), reinterpret_cast( &data ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::AccelerationStructureNV::getHandle" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::AccelerationStructureNV::getHandle" ); return data; } @@ -19026,7 +19211,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkCompileDeferredNV( static_cast( m_device ), static_cast( m_pipeline ), shader ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::compileDeferredNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Pipeline::compileDeferredNV" ); } //=== VK_KHR_maintenance3 === @@ -19116,7 +19301,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( handleType ), pHostPointer, reinterpret_cast( &memoryHostPointerProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryHostPointerPropertiesEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryHostPointerPropertiesEXT" ); return memoryHostPointerProperties; } @@ -19159,7 +19344,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_physicalDevice ), &timeDomainCount, reinterpret_cast( timeDomains.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCalibrateableTimeDomainsEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCalibrateableTimeDomainsEXT" ); VULKAN_HPP_ASSERT( timeDomainCount <= timeDomains.size() ); if ( timeDomainCount < timeDomains.size() ) { @@ -19183,7 +19368,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( timestampInfos.data() ), timestamps.data(), &maxDeviation ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsEXT" ); return data_; } @@ -19199,7 +19384,7 @@ namespace VULKAN_HPP_NAMESPACE uint64_t & maxDeviation = data_.second; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetCalibratedTimestampsEXT( static_cast( m_device ), 1, reinterpret_cast( ×tampInfo ), ×tamp, &maxDeviation ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampEXT" ); return data_; } @@ -19308,7 +19493,7 @@ namespace VULKAN_HPP_NAMESPACE uint64_t value; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetSemaphoreCounterValueKHR( static_cast( m_device ), static_cast( m_semaphore ), &value ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Semaphore::getCounterValueKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Semaphore::getCounterValueKHR" ); return value; } @@ -19320,7 +19505,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkWaitSemaphoresKHR( static_cast( m_device ), reinterpret_cast( &waitInfo ), timeout ) ); - resultCheck( + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitSemaphoresKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout } ); return static_cast( result ); @@ -19332,7 +19517,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkSignalSemaphoreKHR( static_cast( m_device ), reinterpret_cast( &signalInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::signalSemaphoreKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::signalSemaphoreKHR" ); } //=== VK_INTEL_performance_query === @@ -19344,7 +19529,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkInitializePerformanceApiINTEL( static_cast( m_device ), reinterpret_cast( &initializeInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::initializePerformanceApiINTEL" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::initializePerformanceApiINTEL" ); } VULKAN_HPP_INLINE void Device::uninitializePerformanceApiINTEL() const VULKAN_HPP_NOEXCEPT @@ -19361,7 +19546,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkCmdSetPerformanceMarkerINTEL( static_cast( m_commandBuffer ), reinterpret_cast( &markerInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceMarkerINTEL" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceMarkerINTEL" ); } VULKAN_HPP_INLINE void CommandBuffer::setPerformanceStreamMarkerINTEL( const VULKAN_HPP_NAMESPACE::PerformanceStreamMarkerInfoINTEL & markerInfo ) const @@ -19371,7 +19556,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkCmdSetPerformanceStreamMarkerINTEL( static_cast( m_commandBuffer ), reinterpret_cast( &markerInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceStreamMarkerINTEL" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceStreamMarkerINTEL" ); } VULKAN_HPP_INLINE void CommandBuffer::setPerformanceOverrideINTEL( const VULKAN_HPP_NAMESPACE::PerformanceOverrideInfoINTEL & overrideInfo ) const @@ -19381,7 +19566,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkCmdSetPerformanceOverrideINTEL( static_cast( m_commandBuffer ), reinterpret_cast( &overrideInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceOverrideINTEL" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceOverrideINTEL" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE @@ -19414,7 +19599,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkQueueSetPerformanceConfigurationINTEL( static_cast( m_queue ), static_cast( configuration ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::setPerformanceConfigurationINTEL" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::setPerformanceConfigurationINTEL" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PerformanceValueINTEL @@ -19425,7 +19610,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::PerformanceValueINTEL value; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetPerformanceParameterINTEL( static_cast( m_device ), static_cast( parameter ), reinterpret_cast( &value ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPerformanceParameterINTEL" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPerformanceParameterINTEL" ); return value; } @@ -19520,7 +19705,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( fragmentShadingRates.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getFragmentShadingRatesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getFragmentShadingRatesKHR" ); VULKAN_HPP_ASSERT( fragmentShadingRateCount <= fragmentShadingRates.size() ); if ( fragmentShadingRateCount < fragmentShadingRates.size() ) { @@ -19554,13 +19739,13 @@ namespace VULKAN_HPP_NAMESPACE } VULKAN_HPP_INLINE void CommandBuffer::setRenderingInputAttachmentIndicesKHR( - const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR & locationInfo ) const VULKAN_HPP_NOEXCEPT + const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR & inputAttachmentIndexInfo ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( getDispatcher()->vkCmdSetRenderingInputAttachmentIndicesKHR && "Function requires " ); - getDispatcher()->vkCmdSetRenderingInputAttachmentIndicesKHR( static_cast( m_commandBuffer ), - reinterpret_cast( &locationInfo ) ); + getDispatcher()->vkCmdSetRenderingInputAttachmentIndicesKHR( + static_cast( m_commandBuffer ), reinterpret_cast( &inputAttachmentIndexInfo ) ); } //=== VK_EXT_buffer_device_address === @@ -19599,7 +19784,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_physicalDevice ), &toolCount, reinterpret_cast( toolProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getToolPropertiesEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getToolPropertiesEXT" ); VULKAN_HPP_ASSERT( toolCount <= toolProperties.size() ); if ( toolCount < toolProperties.size() ) { @@ -19616,9 +19801,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkWaitForPresentKHR( static_cast( m_device ), static_cast( m_swapchain ), presentId, timeout ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::waitForPresent", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( + result, + VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::waitForPresent", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); return static_cast( result ); } @@ -19645,7 +19831,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_physicalDevice ), &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCooperativeMatrixPropertiesNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCooperativeMatrixPropertiesNV" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { @@ -19678,7 +19864,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( combinations.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV" ); VULKAN_HPP_ASSERT( combinationCount <= combinations.size() ); if ( combinationCount < combinations.size() ) { @@ -19716,7 +19902,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( presentModes.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfacePresentModes2EXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfacePresentModes2EXT" ); VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); if ( presentModeCount < presentModes.size() ) { @@ -19732,7 +19918,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkAcquireFullScreenExclusiveModeEXT( static_cast( m_device ), static_cast( m_swapchain ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::acquireFullScreenExclusiveModeEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::acquireFullScreenExclusiveModeEXT" ); } VULKAN_HPP_INLINE void SwapchainKHR::releaseFullScreenExclusiveModeEXT() const @@ -19742,7 +19928,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkReleaseFullScreenExclusiveModeEXT( static_cast( m_device ), static_cast( m_swapchain ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::releaseFullScreenExclusiveModeEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::releaseFullScreenExclusiveModeEXT" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR @@ -19756,7 +19942,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetDeviceGroupSurfacePresentModes2EXT( static_cast( m_device ), reinterpret_cast( &surfaceInfo ), reinterpret_cast( &modes ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupSurfacePresentModes2EXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupSurfacePresentModes2EXT" ); return modes; } @@ -20039,9 +20225,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkDeferredOperationJoinKHR( static_cast( m_device ), static_cast( m_operation ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::DeferredOperationKHR::join", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eThreadDoneKHR, VULKAN_HPP_NAMESPACE::Result::eThreadIdleKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( + result, + VULKAN_HPP_NAMESPACE_STRING "::DeferredOperationKHR::join", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eThreadDoneKHR, VULKAN_HPP_NAMESPACE::Result::eThreadIdleKHR } ); return static_cast( result ); } @@ -20071,7 +20258,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutablePropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutablePropertiesKHR" ); VULKAN_HPP_ASSERT( executableCount <= properties.size() ); if ( executableCount < properties.size() ) { @@ -20103,7 +20290,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( statistics.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutableStatisticsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutableStatisticsKHR" ); VULKAN_HPP_ASSERT( statisticCount <= statistics.size() ); if ( statisticCount < statistics.size() ) { @@ -20138,7 +20325,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( internalRepresentations.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutableInternalRepresentationsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineExecutableInternalRepresentationsKHR" ); VULKAN_HPP_ASSERT( internalRepresentationCount <= internalRepresentations.size() ); if ( internalRepresentationCount < internalRepresentations.size() ) { @@ -20155,7 +20342,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkCopyMemoryToImageEXT( static_cast( m_device ), reinterpret_cast( ©MemoryToImageInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToImageEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToImageEXT" ); } VULKAN_HPP_INLINE void Device::copyImageToMemoryEXT( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfoEXT & copyImageToMemoryInfo ) const @@ -20164,7 +20351,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkCopyImageToMemoryEXT( static_cast( m_device ), reinterpret_cast( ©ImageToMemoryInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToMemoryEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToMemoryEXT" ); } VULKAN_HPP_INLINE void Device::copyImageToImageEXT( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfoEXT & copyImageToImageInfo ) const @@ -20173,7 +20360,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkCopyImageToImageEXT( static_cast( m_device ), reinterpret_cast( ©ImageToImageInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToImageEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToImageEXT" ); } VULKAN_HPP_INLINE void Device::transitionImageLayoutEXT( @@ -20183,7 +20370,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkTransitionImageLayoutEXT( static_cast( m_device ), transitions.size(), reinterpret_cast( transitions.data() ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::transitionImageLayoutEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::transitionImageLayoutEXT" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR @@ -20229,7 +20416,7 @@ namespace VULKAN_HPP_NAMESPACE void * pData; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkMapMemory2KHR( static_cast( m_device ), reinterpret_cast( &memoryMapInfo ), &pData ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mapMemory2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mapMemory2KHR" ); return pData; } @@ -20240,7 +20427,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkUnmapMemory2KHR( static_cast( m_device ), reinterpret_cast( &memoryUnmapInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::unmapMemory2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::unmapMemory2KHR" ); } //=== VK_EXT_swapchain_maintenance1 === @@ -20251,7 +20438,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkReleaseSwapchainImagesEXT( static_cast( m_device ), reinterpret_cast( &releaseInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::releaseSwapchainImagesEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::releaseSwapchainImagesEXT" ); } //=== VK_NV_device_generated_commands === @@ -20363,7 +20550,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkAcquireDrmDisplayEXT( static_cast( m_physicalDevice ), drmFd, static_cast( display ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::acquireDrmDisplayEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::acquireDrmDisplayEXT" ); } VULKAN_HPP_NODISCARD @@ -20432,7 +20619,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkSetPrivateDataEXT( static_cast( m_device ), static_cast( objectType_ ), objectHandle, static_cast( privateDataSlot ), data ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setPrivateDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setPrivateDataEXT" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE uint64_t Device::getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType_, @@ -20461,7 +20648,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_physicalDevice ), reinterpret_cast( &qualityLevelInfo ), reinterpret_cast( &qualityLevelProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoEncodeQualityLevelPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoEncodeQualityLevelPropertiesKHR" ); return qualityLevelProperties; } @@ -20480,7 +20667,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_physicalDevice ), reinterpret_cast( &qualityLevelInfo ), reinterpret_cast( &qualityLevelProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoEncodeQualityLevelPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getVideoEncodeQualityLevelPropertiesKHR" ); return structureChain; } @@ -20515,7 +20702,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEncodedVideoSessionParametersKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEncodedVideoSessionParametersKHR" ); return data_; } @@ -20552,7 +20739,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEncodedVideoSessionParametersKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEncodedVideoSessionParametersKHR" ); return data_; } @@ -20609,7 +20796,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_device ), static_cast( m_module ), &cacheSize, reinterpret_cast( cacheData.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CudaModuleNV::getCache" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::CudaModuleNV::getCache" ); VULKAN_HPP_ASSERT( cacheSize <= cacheData.size() ); if ( cacheSize < cacheData.size() ) { @@ -20743,7 +20930,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkQueueSubmit2KHR( static_cast( m_queue ), submits.size(), reinterpret_cast( submits.data() ), static_cast( fence ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit2KHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit2KHR" ); } VULKAN_HPP_INLINE void CommandBuffer::writeBufferMarker2AMD( VULKAN_HPP_NAMESPACE::PipelineStageFlags2 stage, @@ -20889,7 +21076,7 @@ namespace VULKAN_HPP_NAMESPACE DataType data; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetBufferOpaqueCaptureDescriptorDataEXT( static_cast( m_device ), reinterpret_cast( &info ), &data ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getBufferOpaqueCaptureDescriptorDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getBufferOpaqueCaptureDescriptorDataEXT" ); return data; } @@ -20904,7 +21091,7 @@ namespace VULKAN_HPP_NAMESPACE DataType data; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetImageOpaqueCaptureDescriptorDataEXT( static_cast( m_device ), reinterpret_cast( &info ), &data ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageOpaqueCaptureDescriptorDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageOpaqueCaptureDescriptorDataEXT" ); return data; } @@ -20919,7 +21106,7 @@ namespace VULKAN_HPP_NAMESPACE DataType data; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetImageViewOpaqueCaptureDescriptorDataEXT( static_cast( m_device ), reinterpret_cast( &info ), &data ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageViewOpaqueCaptureDescriptorDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageViewOpaqueCaptureDescriptorDataEXT" ); return data; } @@ -20934,7 +21121,7 @@ namespace VULKAN_HPP_NAMESPACE DataType data; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetSamplerOpaqueCaptureDescriptorDataEXT( static_cast( m_device ), reinterpret_cast( &info ), &data ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSamplerOpaqueCaptureDescriptorDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSamplerOpaqueCaptureDescriptorDataEXT" ); return data; } @@ -20950,7 +21137,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT( static_cast( m_device ), reinterpret_cast( &info ), &data ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAccelerationStructureOpaqueCaptureDescriptorDataEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getAccelerationStructureOpaqueCaptureDescriptorDataEXT" ); return data; } @@ -21059,50 +21246,15 @@ namespace VULKAN_HPP_NAMESPACE } //=== VK_EXT_device_fault === - - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::pair - Device::getFaultInfoEXT() const + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getFaultInfoEXT( VULKAN_HPP_NAMESPACE::DeviceFaultCountsEXT * pFaultCounts, + VULKAN_HPP_NAMESPACE::DeviceFaultInfoEXT * pFaultInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { - VULKAN_HPP_ASSERT( getDispatcher()->vkGetDeviceFaultInfoEXT && "Function requires " ); - - std::pair data_; - VULKAN_HPP_NAMESPACE::DeviceFaultCountsEXT & faultCounts = data_.first; - VULKAN_HPP_NAMESPACE::DeviceFaultInfoEXT & faultInfo = data_.second; - VULKAN_HPP_NAMESPACE::Result result; - do - { - result = static_cast( - getDispatcher()->vkGetDeviceFaultInfoEXT( m_device, reinterpret_cast( &faultCounts ), nullptr ) ); - if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) - { - std::free( faultInfo.pAddressInfos ); - if ( faultCounts.addressInfoCount ) - { - faultInfo.pAddressInfos = reinterpret_cast( - std::malloc( faultCounts.addressInfoCount * sizeof( VULKAN_HPP_NAMESPACE::DeviceFaultAddressInfoEXT ) ) ); - } - std::free( faultInfo.pVendorInfos ); - if ( faultCounts.vendorInfoCount ) - { - faultInfo.pVendorInfos = reinterpret_cast( - std::malloc( faultCounts.vendorInfoCount * sizeof( VULKAN_HPP_NAMESPACE::DeviceFaultVendorInfoEXT ) ) ); - } - std::free( faultInfo.pVendorBinaryData ); - if ( faultCounts.vendorBinarySize ) - { - faultInfo.pVendorBinaryData = std::malloc( faultCounts.vendorBinarySize ); - } - result = static_cast( getDispatcher()->vkGetDeviceFaultInfoEXT( - m_device, reinterpret_cast( &faultCounts ), reinterpret_cast( &faultInfo ) ) ); - } - } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::getFaultInfoEXT", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eIncomplete } ); - - return data_; + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + return static_cast( d.vkGetDeviceFaultInfoEXT( + m_device, reinterpret_cast( pFaultCounts ), reinterpret_cast( pFaultInfo ) ) ); } - # if defined( VK_USE_PLATFORM_WIN32_KHR ) //=== VK_NV_acquire_winrt_display === @@ -21112,7 +21264,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkAcquireWinrtDisplayNV( static_cast( m_physicalDevice ), static_cast( m_display ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::DisplayKHR::acquireWinrtNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::DisplayKHR::acquireWinrtNV" ); } VULKAN_HPP_NODISCARD @@ -21203,7 +21355,7 @@ namespace VULKAN_HPP_NAMESPACE zx_handle_t zirconHandle; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetMemoryZirconHandleFUCHSIA( static_cast( m_device ), reinterpret_cast( &getZirconHandleInfo ), &zirconHandle ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryZirconHandleFUCHSIA" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryZirconHandleFUCHSIA" ); return zirconHandle; } @@ -21220,7 +21372,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( handleType ), zirconHandle, reinterpret_cast( &memoryZirconHandleProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryZirconHandlePropertiesFUCHSIA" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryZirconHandlePropertiesFUCHSIA" ); return memoryZirconHandleProperties; } @@ -21237,7 +21389,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkImportSemaphoreZirconHandleFUCHSIA( static_cast( m_device ), reinterpret_cast( &importSemaphoreZirconHandleInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreZirconHandleFUCHSIA" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreZirconHandleFUCHSIA" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE zx_handle_t @@ -21249,7 +21401,7 @@ namespace VULKAN_HPP_NAMESPACE zx_handle_t zirconHandle; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetSemaphoreZirconHandleFUCHSIA( static_cast( m_device ), reinterpret_cast( &getZirconHandleInfo ), &zirconHandle ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreZirconHandleFUCHSIA" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreZirconHandleFUCHSIA" ); return zirconHandle; } @@ -21292,7 +21444,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkSetBufferCollectionImageConstraintsFUCHSIA( static_cast( m_device ), static_cast( m_collection ), reinterpret_cast( &imageConstraintsInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::BufferCollectionFUCHSIA::setImageConstraints" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::BufferCollectionFUCHSIA::setImageConstraints" ); } VULKAN_HPP_INLINE void @@ -21305,7 +21457,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkSetBufferCollectionBufferConstraintsFUCHSIA( static_cast( m_device ), static_cast( m_collection ), reinterpret_cast( &bufferConstraintsInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::BufferCollectionFUCHSIA::setBufferConstraints" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::BufferCollectionFUCHSIA::setBufferConstraints" ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::BufferCollectionPropertiesFUCHSIA BufferCollectionFUCHSIA::getProperties() const @@ -21318,7 +21470,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetBufferCollectionPropertiesFUCHSIA( static_cast( m_device ), static_cast( m_collection ), reinterpret_cast( &properties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::BufferCollectionFUCHSIA::getProperties" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::BufferCollectionFUCHSIA::getProperties" ); return properties; } @@ -21334,7 +21486,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Extent2D maxWorkgroupSize; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI( static_cast( m_device ), static_cast( m_renderPass ), reinterpret_cast( &maxWorkgroupSize ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::RenderPass::getSubpassShadingMaxWorkgroupSizeHUAWEI" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::RenderPass::getSubpassShadingMaxWorkgroupSizeHUAWEI" ); return maxWorkgroupSize; } @@ -21369,7 +21521,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetMemoryRemoteAddressNV( static_cast( m_device ), reinterpret_cast( &memoryGetRemoteAddressInfo ), reinterpret_cast( &address ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryRemoteAddressNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryRemoteAddressNV" ); return address; } @@ -21386,7 +21538,7 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkGetPipelinePropertiesEXT( static_cast( m_device ), reinterpret_cast( &pipelineInfo ), reinterpret_cast( &pipelineProperties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelinePropertiesEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelinePropertiesEXT" ); return pipelineProperties; } @@ -21575,11 +21727,11 @@ namespace VULKAN_HPP_NAMESPACE static_cast( deferredOperation ), infos.size(), reinterpret_cast( infos.data() ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::buildMicromapsEXT", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::buildMicromapsEXT", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); return static_cast( result ); } @@ -21593,11 +21745,11 @@ namespace VULKAN_HPP_NAMESPACE static_cast( getDispatcher()->vkCopyMicromapEXT( static_cast( m_device ), static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::copyMicromapEXT", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::copyMicromapEXT", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); return static_cast( result ); } @@ -21612,11 +21764,11 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkCopyMicromapToMemoryEXT( static_cast( m_device ), static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::copyMicromapToMemoryEXT", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::copyMicromapToMemoryEXT", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); return static_cast( result ); } @@ -21631,11 +21783,11 @@ namespace VULKAN_HPP_NAMESPACE getDispatcher()->vkCopyMemoryToMicromapEXT( static_cast( m_device ), static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); - resultCheck( result, - VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToMicromapEXT", - { VULKAN_HPP_NAMESPACE::Result::eSuccess, - VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, - VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, + VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToMicromapEXT", + { VULKAN_HPP_NAMESPACE::Result::eSuccess, + VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, + VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); return static_cast( result ); } @@ -21659,7 +21811,7 @@ namespace VULKAN_HPP_NAMESPACE data.size() * sizeof( DataType ), reinterpret_cast( data.data() ), stride ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeMicromapsPropertiesEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeMicromapsPropertiesEXT" ); return data; } @@ -21681,7 +21833,7 @@ namespace VULKAN_HPP_NAMESPACE sizeof( DataType ), reinterpret_cast( &data ), stride ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeMicromapsPropertyEXT" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::writeMicromapsPropertyEXT" ); return data; } @@ -22368,7 +22520,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( imageFormatProperties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getOpticalFlowImageFormatsNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getOpticalFlowImageFormatsNV" ); VULKAN_HPP_ASSERT( formatCount <= imageFormatProperties.size() ); if ( formatCount < imageFormatProperties.size() ) { @@ -22413,7 +22565,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( bindingPoint ), static_cast( view ), static_cast( layout ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::OpticalFlowSessionNV::bindImage" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::OpticalFlowSessionNV::bindImage" ); } VULKAN_HPP_INLINE void CommandBuffer::opticalFlowExecuteNV( VULKAN_HPP_NAMESPACE::OpticalFlowSessionNV session, @@ -22519,6 +22671,15 @@ namespace VULKAN_HPP_NAMESPACE return structureChain; } + //=== VK_AMD_anti_lag === + + VULKAN_HPP_INLINE void Device::antiLagUpdateAMD( const VULKAN_HPP_NAMESPACE::AntiLagDataAMD & data ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( getDispatcher()->vkAntiLagUpdateAMD && "Function requires " ); + + getDispatcher()->vkAntiLagUpdateAMD( static_cast( m_device ), reinterpret_cast( &data ) ); + } + //=== VK_EXT_shader_object === VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE @@ -22572,7 +22733,7 @@ namespace VULKAN_HPP_NAMESPACE # endif } - return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::ShaderEXT( *this, *reinterpret_cast( &shader ), allocator ); + return VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::ShaderEXT( *this, *reinterpret_cast( &shader ), allocator, result ); } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector ShaderEXT::getBinaryData() const @@ -22593,7 +22754,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_device ), static_cast( m_shader ), &dataSize, reinterpret_cast( data.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::ShaderEXT::getBinaryData" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::ShaderEXT::getBinaryData" ); VULKAN_HPP_ASSERT( dataSize <= data.size() ); if ( dataSize < data.size() ) { @@ -22621,6 +22782,125 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( shaders.data() ) ); } + //=== VK_KHR_pipeline_binary === + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE + VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::CreateReturnType>::Type + Device::createPipelineBinariesKHR( VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR const & createInfo, + VULKAN_HPP_NAMESPACE::Optional allocator ) const + { + std::vector pipelineBinaries; + VULKAN_HPP_NAMESPACE::PipelineBinaryHandlesInfoKHR binaries; + VULKAN_HPP_NAMESPACE::Result result; + if ( createInfo.pKeysAndDataInfo ) + { + VULKAN_HPP_ASSERT( !createInfo.pipeline && !createInfo.pPipelineCreateInfo ); + pipelineBinaries.resize( createInfo.pKeysAndDataInfo->binaryCount ); + binaries.pipelineBinaryCount = createInfo.pKeysAndDataInfo->binaryCount; + binaries.pPipelineBinaries = pipelineBinaries.data(); + result = static_cast( getDispatcher()->vkCreatePipelineBinariesKHR( + static_cast( m_device ), + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + } + else + { + VULKAN_HPP_ASSERT( !createInfo.pipeline ^ !createInfo.pPipelineCreateInfo ); + result = static_cast( getDispatcher()->vkCreatePipelineBinariesKHR( + static_cast( m_device ), + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + pipelineBinaries.resize( binaries.pipelineBinaryCount ); + binaries.pPipelineBinaries = pipelineBinaries.data(); + result = static_cast( getDispatcher()->vkCreatePipelineBinariesKHR( + static_cast( m_device ), + reinterpret_cast( &createInfo ), + reinterpret_cast( static_cast( allocator ) ), + reinterpret_cast( &binaries ) ) ); + } + } + + if ( ( result != VULKAN_HPP_NAMESPACE::Result::eSuccess ) && ( result != VULKAN_HPP_NAMESPACE::Result::eIncomplete ) && + ( result != VULKAN_HPP_NAMESPACE::Result::ePipelineBinaryMissingKHR ) ) + { +# if defined( VULKAN_HPP_RAII_NO_EXCEPTIONS ) + return VULKAN_HPP_UNEXPECTED( result ); +# else + VULKAN_HPP_NAMESPACE::detail::throwResultException( result, "Device::createPipelineBinariesKHR" ); +# endif + } + + std::vector pipelineBinariesRAII; + pipelineBinariesRAII.reserve( pipelineBinaries.size() ); + for ( auto & pipelineBinary : pipelineBinaries ) + { + pipelineBinariesRAII.emplace_back( *this, *reinterpret_cast( &pipelineBinary ), allocator, result ); + } + return pipelineBinariesRAII; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR + Device::getPipelineKeyKHR( Optional pipelineCreateInfo ) const + { + VULKAN_HPP_ASSERT( getDispatcher()->vkGetPipelineKeyKHR && "Function requires " ); + + VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR pipelineKey; + VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetPipelineKeyKHR( + static_cast( m_device ), + reinterpret_cast( static_cast( pipelineCreateInfo ) ), + reinterpret_cast( &pipelineKey ) ) ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineKeyKHR" ); + + return pipelineKey; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::pair> + Device::getPipelineBinaryDataKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryDataInfoKHR & info ) const + { + VULKAN_HPP_ASSERT( getDispatcher()->vkGetPipelineBinaryDataKHR && "Function requires " ); + + std::pair> data_; + VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR & pipelineBinaryKey = data_.first; + std::vector & pipelineBinaryData = data_.second; + size_t pipelineBinaryDataSize; + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkGetPipelineBinaryDataKHR( static_cast( m_device ), + reinterpret_cast( &info ), + reinterpret_cast( &pipelineBinaryKey ), + &pipelineBinaryDataSize, + nullptr ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + pipelineBinaryData.resize( pipelineBinaryDataSize ); + result = static_cast( + getDispatcher()->vkGetPipelineBinaryDataKHR( static_cast( m_device ), + reinterpret_cast( &info ), + reinterpret_cast( &pipelineBinaryKey ), + &pipelineBinaryDataSize, + reinterpret_cast( pipelineBinaryData.data() ) ) ); + } + + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getPipelineBinaryDataKHR" ); + + return data_; + } + + VULKAN_HPP_INLINE void + Device::releaseCapturedPipelineDataKHR( const VULKAN_HPP_NAMESPACE::ReleaseCapturedPipelineDataInfoKHR & info, + Optional allocator ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( getDispatcher()->vkReleaseCapturedPipelineDataKHR && "Function requires " ); + + getDispatcher()->vkReleaseCapturedPipelineDataKHR( + static_cast( m_device ), + reinterpret_cast( &info ), + reinterpret_cast( static_cast( allocator ) ) ); + } + //=== VK_QCOM_tile_properties === VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector Framebuffer::getTilePropertiesQCOM() const @@ -22676,7 +22956,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkSetLatencySleepModeNV( static_cast( m_device ), static_cast( m_swapchain ), reinterpret_cast( &sleepModeInfo ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::setLatencySleepModeNV" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::SwapchainKHR::setLatencySleepModeNV" ); } VULKAN_HPP_INLINE void SwapchainKHR::latencySleepNV( const VULKAN_HPP_NAMESPACE::LatencySleepInfoNV & sleepInfo ) const VULKAN_HPP_NOEXCEPT @@ -22696,15 +22976,20 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( &latencyMarkerInfo ) ); } - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::GetLatencyMarkerInfoNV SwapchainKHR::getLatencyTimingsNV() const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector SwapchainKHR::getLatencyTimingsNV() const { VULKAN_HPP_ASSERT( getDispatcher()->vkGetLatencyTimingsNV && "Function requires " ); - VULKAN_HPP_NAMESPACE::GetLatencyMarkerInfoNV latencyMarkerInfo; + std::vector timings; + VULKAN_HPP_NAMESPACE::GetLatencyMarkerInfoNV latencyMarkerInfo; + getDispatcher()->vkGetLatencyTimingsNV( + static_cast( m_device ), static_cast( m_swapchain ), reinterpret_cast( &latencyMarkerInfo ) ); + timings.resize( latencyMarkerInfo.timingCount ); + latencyMarkerInfo.pTimings = timings.data(); getDispatcher()->vkGetLatencyTimingsNV( static_cast( m_device ), static_cast( m_swapchain ), reinterpret_cast( &latencyMarkerInfo ) ); - return latencyMarkerInfo; + return timings; } VULKAN_HPP_INLINE void Queue::notifyOutOfBandNV( const VULKAN_HPP_NAMESPACE::OutOfBandQueueTypeInfoNV & queueTypeInfo ) const VULKAN_HPP_NOEXCEPT @@ -22736,7 +23021,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_physicalDevice ), &propertyCount, reinterpret_cast( properties.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCooperativeMatrixPropertiesKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCooperativeMatrixPropertiesKHR" ); VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); if ( propertyCount < properties.size() ) { @@ -22768,7 +23053,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ScreenBufferPropertiesQNX properties; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetScreenBufferPropertiesQNX( static_cast( m_device ), &buffer, reinterpret_cast( &properties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getScreenBufferPropertiesQNX" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getScreenBufferPropertiesQNX" ); return properties; } @@ -22784,7 +23069,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ScreenBufferPropertiesQNX & properties = structureChain.template get(); VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetScreenBufferPropertiesQNX( static_cast( m_device ), &buffer, reinterpret_cast( &properties ) ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getScreenBufferPropertiesQNX" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getScreenBufferPropertiesQNX" ); return structureChain; } @@ -22822,7 +23107,7 @@ namespace VULKAN_HPP_NAMESPACE static_cast( m_physicalDevice ), &timeDomainCount, reinterpret_cast( timeDomains.data() ) ) ); } } while ( result == VULKAN_HPP_NAMESPACE::Result::eIncomplete ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCalibrateableTimeDomainsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getCalibrateableTimeDomainsKHR" ); VULKAN_HPP_ASSERT( timeDomainCount <= timeDomains.size() ); if ( timeDomainCount < timeDomains.size() ) { @@ -22846,7 +23131,7 @@ namespace VULKAN_HPP_NAMESPACE reinterpret_cast( timestampInfos.data() ), timestamps.data(), &maxDeviation ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsKHR" ); return data_; } @@ -22862,7 +23147,7 @@ namespace VULKAN_HPP_NAMESPACE uint64_t & maxDeviation = data_.second; VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkGetCalibratedTimestampsKHR( static_cast( m_device ), 1, reinterpret_cast( ×tampInfo ), ×tamp, &maxDeviation ) ); - resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampKHR" ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampKHR" ); return data_; } diff --git a/third_party/vulkan/vulkan_shared.hpp b/third_party/vulkan/vulkan_shared.hpp index 8b2697a..1bff517 100644 --- a/third_party/vulkan/vulkan_shared.hpp +++ b/third_party/vulkan/vulkan_shared.hpp @@ -52,6 +52,28 @@ namespace VULKAN_HPP_NAMESPACE { }; + template + struct HasPoolType : std::false_type + { + }; + + template + struct HasPoolType::deleter::PoolTypeExport() )> : std::true_type + { + }; + + template + struct GetPoolType + { + using type = NoDestructor; + }; + + template + struct GetPoolType::value>::type> + { + using type = typename SharedHandleTraits::deleter::PoolTypeExport; + }; + //===================================================================================================================== template @@ -257,12 +279,23 @@ namespace VULKAN_HPP_NAMESPACE public: SharedHandle() = default; - template ::value>::type> + template ::value && !HasPoolType::value>::type> explicit SharedHandle( HandleType handle, SharedHandle> parent, DeleterType deleter = DeleterType() ) VULKAN_HPP_NOEXCEPT : BaseType( handle, std::move( parent ), std::move( deleter ) ) { } + template ::value && HasPoolType::value>::type> + explicit SharedHandle( HandleType handle, + SharedHandle> parent, + SharedHandle::type> pool, + const Dispatcher & dispatch VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) VULKAN_HPP_NOEXCEPT + : BaseType( handle, std::move( parent ), DeleterType{ std::move( pool ), dispatch } ) + { + } + template ::value>::type> explicit SharedHandle( HandleType handle, DeleterType deleter = DeleterType() ) VULKAN_HPP_NOEXCEPT : BaseType( handle, std::move( deleter ) ) { @@ -390,6 +423,8 @@ namespace VULKAN_HPP_NAMESPACE public: using DestructorType = typename SharedHandleTraits::DestructorType; + using PoolTypeExport = PoolType; + template using ReturnType = decltype( std::declval().free( PoolType(), 0u, nullptr, Dispatcher() ) ); @@ -409,7 +444,7 @@ namespace VULKAN_HPP_NAMESPACE public: void destroy( DestructorType parent, HandleType handle ) const VULKAN_HPP_NOEXCEPT { - VULKAN_HPP_ASSERT( m_destroy && m_dispatch ); + VULKAN_HPP_ASSERT( m_destroy && m_dispatch && m_pool ); ( parent.*m_destroy )( m_pool.get(), 1u, &handle, *m_dispatch ); } @@ -925,6 +960,17 @@ namespace VULKAN_HPP_NAMESPACE using SharedShaderEXT = SharedHandle; + //=== VK_KHR_pipeline_binary === + template <> + class SharedHandleTraits + { + public: + using DestructorType = Device; + using deleter = ObjectDestroyShared; + }; + + using SharedPipelineBinaryKHR = SharedHandle; + enum class SwapchainOwns { no, diff --git a/third_party/vulkan/vulkan_static_assertions.hpp b/third_party/vulkan/vulkan_static_assertions.hpp index 693977c..1d0e054 100644 --- a/third_party/vulkan/vulkan_static_assertions.hpp +++ b/third_party/vulkan/vulkan_static_assertions.hpp @@ -105,6 +105,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Instance is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Instance is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::InstanceCreateInfo ) == sizeof( VkInstanceCreateInfo ), "struct and wrapper have different size!" ); @@ -121,6 +122,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "MemoryType is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDevice ) == sizeof( VkPhysicalDevice ), "handle and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_copy_constructible::value, "PhysicalDevice is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "PhysicalDevice is not nothrow_move_constructible!" ); @@ -160,6 +162,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Device is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Device is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::DeviceCreateInfo ) == sizeof( VkDeviceCreateInfo ), "struct and wrapper have different size!" ); @@ -184,6 +187,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Queue is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Queue is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::SubmitInfo ) == sizeof( VkSubmitInfo ), "struct and wrapper have different size!" ); @@ -201,6 +205,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DeviceMemory is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DeviceMemory is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::MemoryRequirements ) == sizeof( VkMemoryRequirements ), "struct and wrapper have different size!" ); @@ -260,6 +265,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Fence is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Fence is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::FenceCreateInfo ) == sizeof( VkFenceCreateInfo ), "struct and wrapper have different size!" ); @@ -268,6 +274,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Semaphore is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Semaphore is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::SemaphoreCreateInfo ) == sizeof( VkSemaphoreCreateInfo ), "struct and wrapper have different size!" ); @@ -276,6 +283,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Event is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Event is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::EventCreateInfo ) == sizeof( VkEventCreateInfo ), "struct and wrapper have different size!" ); @@ -284,6 +292,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "QueryPool is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "QueryPool is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::QueryPoolCreateInfo ) == sizeof( VkQueryPoolCreateInfo ), "struct and wrapper have different size!" ); @@ -292,6 +301,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Buffer is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Buffer is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::BufferCreateInfo ) == sizeof( VkBufferCreateInfo ), "struct and wrapper have different size!" ); @@ -300,6 +310,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "BufferView is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "BufferView is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::BufferViewCreateInfo ) == sizeof( VkBufferViewCreateInfo ), "struct and wrapper have different size!" ); @@ -308,6 +319,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Image is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Image is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::ImageCreateInfo ) == sizeof( VkImageCreateInfo ), "struct and wrapper have different size!" ); @@ -332,6 +344,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "ImageView is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "ImageView is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::ImageViewCreateInfo ) == sizeof( VkImageViewCreateInfo ), "struct and wrapper have different size!" ); @@ -340,6 +353,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "ShaderModule is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "ShaderModule is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::ShaderModuleCreateInfo ) == sizeof( VkShaderModuleCreateInfo ), @@ -349,6 +363,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "PipelineCache is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "PipelineCache is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineCacheCreateInfo ) == sizeof( VkPipelineCacheCreateInfo ), @@ -370,6 +385,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Pipeline is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Pipeline is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineColorBlendAttachmentState ) == sizeof( VkPipelineColorBlendAttachmentState ), @@ -477,6 +493,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "Viewport is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineLayout ) == sizeof( VkPipelineLayout ), "handle and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_copy_constructible::value, "PipelineLayout is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "PipelineLayout is not nothrow_move_constructible!" ); @@ -492,6 +509,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Sampler is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Sampler is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::SamplerCreateInfo ) == sizeof( VkSamplerCreateInfo ), "struct and wrapper have different size!" ); @@ -515,6 +533,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DescriptorPool is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DescriptorPool is not nothrow_move_constructible!" ); @@ -530,6 +549,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DescriptorSet is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DescriptorSet is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::DescriptorSetAllocateInfo ) == sizeof( VkDescriptorSetAllocateInfo ), @@ -539,6 +559,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DescriptorSetLayout is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DescriptorSetLayout is not nothrow_move_constructible!" ); @@ -571,6 +592,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Framebuffer is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "Framebuffer is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::FramebufferCreateInfo ) == sizeof( VkFramebufferCreateInfo ), @@ -580,6 +602,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "RenderPass is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "RenderPass is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::RenderPassCreateInfo ) == sizeof( VkRenderPassCreateInfo ), "struct and wrapper have different size!" ); @@ -598,6 +621,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "CommandPool is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "CommandPool is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::CommandPoolCreateInfo ) == sizeof( VkCommandPoolCreateInfo ), @@ -607,6 +631,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "CommandBuffer is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "CommandBuffer is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::CommandBufferAllocateInfo ) == sizeof( VkCommandBufferAllocateInfo ), @@ -983,11 +1008,15 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "SamplerYcbcrConversion is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "SamplerYcbcrConversion is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate ) == sizeof( VkDescriptorUpdateTemplate ), "handle and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_copy_constructible::value, + "DescriptorUpdateTemplate is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DescriptorUpdateTemplate is not nothrow_move_constructible!" ); @@ -1516,6 +1545,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "PrivateDataSlot is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "PrivateDataSlot is not nothrow_move_constructible!" ); @@ -1784,6 +1814,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "SurfaceKHR is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "SurfaceKHR is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR ) == sizeof( VkSurfaceCapabilitiesKHR ), @@ -1806,6 +1837,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "SwapchainKHR is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "SwapchainKHR is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PresentInfoKHR ) == sizeof( VkPresentInfoKHR ), "struct and wrapper have different size!" ); @@ -1852,6 +1884,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DisplayKHR is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DisplayKHR is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::DisplayModeCreateInfoKHR ) == sizeof( VkDisplayModeCreateInfoKHR ), @@ -1861,6 +1894,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DisplayModeKHR is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DisplayModeKHR is not nothrow_move_constructible!" ); @@ -1961,6 +1995,8 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "DebugReportCallbackEXT is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DebugReportCallbackEXT is not nothrow_move_constructible!" ); @@ -2003,11 +2039,14 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "VideoSessionKHR is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "VideoSessionKHR is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::VideoSessionParametersKHR ) == sizeof( VkVideoSessionParametersKHR ), "handle and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_copy_constructible::value, + "VideoSessionParametersKHR is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "VideoSessionParametersKHR is not nothrow_move_constructible!" ); @@ -2184,9 +2223,11 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "CuModuleNVX is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "CuModuleNVX is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::CuFunctionNVX ) == sizeof( VkCuFunctionNVX ), "handle and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_copy_constructible::value, "CuFunctionNVX is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "CuFunctionNVX is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::CuModuleCreateInfoNVX ) == sizeof( VkCuModuleCreateInfoNVX ), @@ -3158,6 +3199,8 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "DebugUtilsMessengerEXT is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DebugUtilsMessengerEXT is not nothrow_move_constructible!" ); @@ -3459,6 +3502,8 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "AccelerationStructureKHR is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "AccelerationStructureKHR is not nothrow_move_constructible!" ); @@ -3671,6 +3716,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "ValidationCacheEXT is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "ValidationCacheEXT is not nothrow_move_constructible!" ); @@ -3802,6 +3848,8 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "AccelerationStructureNV is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "AccelerationStructureNV is not nothrow_move_constructible!" ); @@ -4011,16 +4059,6 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceComputeShaderDerivativesFeaturesNV is not nothrow_move_constructible!" ); - //=== VK_NV_mesh_shader === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceMeshShaderFeaturesNV ) == sizeof( VkPhysicalDeviceMeshShaderFeaturesNV ), @@ -4147,6 +4185,8 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PerformanceConfigurationINTEL is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "PerformanceConfigurationINTEL is not nothrow_move_constructible!" ); @@ -4540,6 +4580,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DeferredOperationKHR is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "DeferredOperationKHR is not nothrow_move_constructible!" ); @@ -4819,6 +4860,8 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "IndirectCommandsLayoutNV is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "IndirectCommandsLayoutNV is not nothrow_move_constructible!" ); @@ -5120,9 +5163,11 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "CudaModuleNV is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "CudaModuleNV is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::CudaFunctionNV ) == sizeof( VkCudaFunctionNV ), "handle and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_copy_constructible::value, "CudaFunctionNV is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "CudaFunctionNV is not nothrow_move_constructible!" ); @@ -5786,6 +5831,8 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "BufferCollectionFUCHSIA is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "BufferCollectionFUCHSIA is not nothrow_move_constructible!" ); @@ -6102,6 +6149,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "MicromapEXT is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "MicromapEXT is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceOpacityMicromapFeaturesEXT ) == sizeof( VkPhysicalDeviceOpacityMicromapFeaturesEXT ), @@ -6728,6 +6776,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "OpticalFlowSessionNV is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "OpticalFlowSessionNV is not nothrow_move_constructible!" ); @@ -6847,6 +6896,25 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "BufferUsageFlags2CreateInfoKHR is not nothrow_move_constructible!" ); +//=== VK_AMD_anti_lag === + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceAntiLagFeaturesAMD ) == sizeof( VkPhysicalDeviceAntiLagFeaturesAMD ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceAntiLagFeaturesAMD is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::AntiLagDataAMD ) == sizeof( VkAntiLagDataAMD ), "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "AntiLagDataAMD is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::AntiLagPresentationInfoAMD ) == sizeof( VkAntiLagPresentationInfoAMD ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "AntiLagPresentationInfoAMD is not nothrow_move_constructible!" ); + //=== VK_KHR_ray_tracing_position_fetch === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceRayTracingPositionFetchFeaturesKHR ) == @@ -6860,6 +6928,7 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "ShaderEXT is not copy_constructible!" ); VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "ShaderEXT is not nothrow_move_constructible!" ); VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderObjectFeaturesEXT ) == sizeof( VkPhysicalDeviceShaderObjectFeaturesEXT ), @@ -6881,6 +6950,89 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "ShaderCreateInfoEXT is not nothrow_move_constructible!" ); +//=== VK_KHR_pipeline_binary === + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineBinaryFeaturesKHR ) == sizeof( VkPhysicalDevicePipelineBinaryFeaturesKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDevicePipelineBinaryFeaturesKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineBinaryPropertiesKHR ) == sizeof( VkPhysicalDevicePipelineBinaryPropertiesKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDevicePipelineBinaryPropertiesKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::DevicePipelineBinaryInternalCacheControlKHR ) == + sizeof( VkDevicePipelineBinaryInternalCacheControlKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "DevicePipelineBinaryInternalCacheControlKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineBinaryKHR ) == sizeof( VkPipelineBinaryKHR ), "handle and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_copy_constructible::value, "PipelineBinaryKHR is not copy_constructible!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PipelineBinaryKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR ) == sizeof( VkPipelineBinaryKeyKHR ), "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PipelineBinaryKeyKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineBinaryDataKHR ) == sizeof( VkPipelineBinaryDataKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PipelineBinaryDataKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineBinaryKeysAndDataKHR ) == sizeof( VkPipelineBinaryKeysAndDataKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PipelineBinaryKeysAndDataKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineBinaryCreateInfoKHR ) == sizeof( VkPipelineBinaryCreateInfoKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PipelineBinaryCreateInfoKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineBinaryInfoKHR ) == sizeof( VkPipelineBinaryInfoKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PipelineBinaryInfoKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::ReleaseCapturedPipelineDataInfoKHR ) == sizeof( VkReleaseCapturedPipelineDataInfoKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "ReleaseCapturedPipelineDataInfoKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineBinaryDataInfoKHR ) == sizeof( VkPipelineBinaryDataInfoKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PipelineBinaryDataInfoKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineCreateInfoKHR ) == sizeof( VkPipelineCreateInfoKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PipelineCreateInfoKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineBinaryHandlesInfoKHR ) == sizeof( VkPipelineBinaryHandlesInfoKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PipelineBinaryHandlesInfoKHR is not nothrow_move_constructible!" ); + //=== VK_QCOM_tile_properties === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceTilePropertiesFeaturesQCOM ) == sizeof( VkPhysicalDeviceTilePropertiesFeaturesQCOM ), @@ -6979,6 +7131,24 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "MutableDescriptorTypeCreateInfoEXT is not nothrow_move_constructible!" ); +//=== VK_EXT_legacy_vertex_attributes === + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceLegacyVertexAttributesFeaturesEXT ) == + sizeof( VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceLegacyVertexAttributesFeaturesEXT is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceLegacyVertexAttributesPropertiesEXT ) == + sizeof( VkPhysicalDeviceLegacyVertexAttributesPropertiesEXT ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceLegacyVertexAttributesPropertiesEXT is not nothrow_move_constructible!" ); + //=== VK_EXT_layer_settings === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::LayerSettingsCreateInfoEXT ) == sizeof( VkLayerSettingsCreateInfoEXT ), @@ -7126,6 +7296,24 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "MultiviewPerViewRenderAreasRenderPassBeginInfoQCOM is not nothrow_move_constructible!" ); +//=== VK_KHR_compute_shader_derivatives === + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceComputeShaderDerivativesFeaturesKHR ) == + sizeof( VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceComputeShaderDerivativesFeaturesKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceComputeShaderDerivativesPropertiesKHR ) == + sizeof( VkPhysicalDeviceComputeShaderDerivativesPropertiesKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceComputeShaderDerivativesPropertiesKHR is not nothrow_move_constructible!" ); + //=== VK_KHR_video_decode_av1 === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::VideoDecodeAV1ProfileInfoKHR ) == sizeof( VkVideoDecodeAV1ProfileInfoKHR ), @@ -7483,6 +7671,64 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "PhysicalDeviceRawAccessChainsFeaturesNV is not nothrow_move_constructible!" ); +//=== VK_KHR_shader_relaxed_extended_instruction === + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR ) == + sizeof( VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR is not nothrow_move_constructible!" ); + +//=== VK_NV_command_buffer_inheritance === + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceCommandBufferInheritanceFeaturesNV ) == + sizeof( VkPhysicalDeviceCommandBufferInheritanceFeaturesNV ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceCommandBufferInheritanceFeaturesNV is not nothrow_move_constructible!" ); + +//=== VK_KHR_maintenance7 === + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance7FeaturesKHR ) == sizeof( VkPhysicalDeviceMaintenance7FeaturesKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceMaintenance7FeaturesKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance7PropertiesKHR ) == sizeof( VkPhysicalDeviceMaintenance7PropertiesKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceMaintenance7PropertiesKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiPropertiesListKHR ) == sizeof( VkPhysicalDeviceLayeredApiPropertiesListKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceLayeredApiPropertiesListKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiPropertiesKHR ) == sizeof( VkPhysicalDeviceLayeredApiPropertiesKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceLayeredApiPropertiesKHR is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiVulkanPropertiesKHR ) == + sizeof( VkPhysicalDeviceLayeredApiVulkanPropertiesKHR ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceLayeredApiVulkanPropertiesKHR is not nothrow_move_constructible!" ); + //=== VK_NV_shader_atomic_float16_vector === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderAtomicFloat16VectorFeaturesNV ) == @@ -7493,6 +7739,16 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "PhysicalDeviceShaderAtomicFloat16VectorFeaturesNV is not nothrow_move_constructible!" ); +//=== VK_EXT_shader_replicated_composites === + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderReplicatedCompositesFeaturesEXT ) == + sizeof( VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceShaderReplicatedCompositesFeaturesEXT is not nothrow_move_constructible!" ); + //=== VK_NV_ray_tracing_validation === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceRayTracingValidationFeaturesNV ) == @@ -7503,4 +7759,29 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "PhysicalDeviceRayTracingValidationFeaturesNV is not nothrow_move_constructible!" ); +//=== VK_MESA_image_alignment_control === + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceImageAlignmentControlFeaturesMESA ) == + sizeof( VkPhysicalDeviceImageAlignmentControlFeaturesMESA ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceImageAlignmentControlFeaturesMESA is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceImageAlignmentControlPropertiesMESA ) == + sizeof( VkPhysicalDeviceImageAlignmentControlPropertiesMESA ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceImageAlignmentControlPropertiesMESA is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::ImageAlignmentControlCreateInfoMESA ) == sizeof( VkImageAlignmentControlCreateInfoMESA ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "ImageAlignmentControlCreateInfoMESA is not nothrow_move_constructible!" ); + #endif diff --git a/third_party/vulkan/vulkan_structs.hpp b/third_party/vulkan/vulkan_structs.hpp index 45d9c2b..5f6391c 100644 --- a/third_party/vulkan/vulkan_structs.hpp +++ b/third_party/vulkan/vulkan_structs.hpp @@ -8,6 +8,9 @@ #ifndef VULKAN_STRUCTS_HPP #define VULKAN_STRUCTS_HPP +// include-what-you-use: make sure, vulkan.hpp is used by code-completers +// IWYU pragma: private; include "vulkan.hpp" + #include // strcmp namespace VULKAN_HPP_NAMESPACE @@ -24,12 +27,12 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR AabbPositionsKHR( float minX_ = {}, float minY_ = {}, float minZ_ = {}, float maxX_ = {}, float maxY_ = {}, float maxZ_ = {} ) VULKAN_HPP_NOEXCEPT - : minX( minX_ ) - , minY( minY_ ) - , minZ( minZ_ ) - , maxX( maxX_ ) - , maxY( maxY_ ) - , maxZ( maxZ_ ) + : minX{ minX_ } + , minY{ minY_ } + , minZ{ minZ_ } + , maxX{ maxX_ } + , maxY{ maxY_ } + , maxZ{ maxZ_ } { } @@ -195,14 +198,14 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR indexData_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR transformData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , vertexFormat( vertexFormat_ ) - , vertexData( vertexData_ ) - , vertexStride( vertexStride_ ) - , maxVertex( maxVertex_ ) - , indexType( indexType_ ) - , indexData( indexData_ ) - , transformData( transformData_ ) + : pNext{ pNext_ } + , vertexFormat{ vertexFormat_ } + , vertexData{ vertexData_ } + , vertexStride{ vertexStride_ } + , maxVertex{ maxVertex_ } + , indexType{ indexType_ } + , indexData{ indexData_ } + , transformData{ transformData_ } { } @@ -336,9 +339,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_14 AccelerationStructureGeometryAabbsDataKHR( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR data_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize stride_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , data( data_ ) - , stride( stride_ ) + : pNext{ pNext_ } + , data{ data_ } + , stride{ stride_ } { } @@ -428,9 +431,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_14 AccelerationStructureGeometryInstancesDataKHR( VULKAN_HPP_NAMESPACE::Bool32 arrayOfPointers_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR data_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , arrayOfPointers( arrayOfPointers_ ) - , data( data_ ) + : pNext{ pNext_ } + , arrayOfPointers{ arrayOfPointers_ } + , data{ data_ } { } @@ -586,10 +589,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryDataKHR geometry_ = {}, VULKAN_HPP_NAMESPACE::GeometryFlagsKHR flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , geometryType( geometryType_ ) - , geometry( geometry_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , geometryType{ geometryType_ } + , geometry{ geometry_ } + , flags{ flags_ } { } @@ -738,16 +741,16 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR * const * ppGeometries_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR scratchData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , type( type_ ) - , flags( flags_ ) - , mode( mode_ ) - , srcAccelerationStructure( srcAccelerationStructure_ ) - , dstAccelerationStructure( dstAccelerationStructure_ ) - , geometryCount( geometryCount_ ) - , pGeometries( pGeometries_ ) - , ppGeometries( ppGeometries_ ) - , scratchData( scratchData_ ) + : pNext{ pNext_ } + , type{ type_ } + , flags{ flags_ } + , mode{ mode_ } + , srcAccelerationStructure{ srcAccelerationStructure_ } + , dstAccelerationStructure{ dstAccelerationStructure_ } + , geometryCount{ geometryCount_ } + , pGeometries{ pGeometries_ } + , ppGeometries{ ppGeometries_ } + , scratchData{ scratchData_ } { } @@ -954,10 +957,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t primitiveOffset_ = {}, uint32_t firstVertex_ = {}, uint32_t transformOffset_ = {} ) VULKAN_HPP_NOEXCEPT - : primitiveCount( primitiveCount_ ) - , primitiveOffset( primitiveOffset_ ) - , firstVertex( firstVertex_ ) - , transformOffset( transformOffset_ ) + : primitiveCount{ primitiveCount_ } + , primitiveOffset{ primitiveOffset_ } + , firstVertex{ firstVertex_ } + , transformOffset{ transformOffset_ } { } @@ -1063,10 +1066,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize updateScratchSize_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize buildScratchSize_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , accelerationStructureSize( accelerationStructureSize_ ) - , updateScratchSize( updateScratchSize_ ) - , buildScratchSize( buildScratchSize_ ) + : pNext{ pNext_ } + , accelerationStructureSize{ accelerationStructureSize_ } + , updateScratchSize{ updateScratchSize_ } + , buildScratchSize{ buildScratchSize_ } { } @@ -1156,9 +1159,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR AccelerationStructureCaptureDescriptorDataInfoEXT( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructureNV_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , accelerationStructure( accelerationStructure_ ) - , accelerationStructureNV( accelerationStructureNV_ ) + : pNext{ pNext_ } + , accelerationStructure{ accelerationStructure_ } + , accelerationStructureNV{ accelerationStructureNV_ } { } @@ -1275,13 +1278,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type_ = VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR::eTopLevel, VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , createFlags( createFlags_ ) - , buffer( buffer_ ) - , offset( offset_ ) - , size( size_ ) - , type( type_ ) - , deviceAddress( deviceAddress_ ) + : pNext{ pNext_ } + , createFlags{ createFlags_ } + , buffer{ buffer_ } + , offset{ offset_ } + , size{ size_ } + , type{ type_ } + , deviceAddress{ deviceAddress_ } { } @@ -1431,18 +1434,18 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Buffer transformData_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize transformOffset_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , vertexData( vertexData_ ) - , vertexOffset( vertexOffset_ ) - , vertexCount( vertexCount_ ) - , vertexStride( vertexStride_ ) - , vertexFormat( vertexFormat_ ) - , indexData( indexData_ ) - , indexOffset( indexOffset_ ) - , indexCount( indexCount_ ) - , indexType( indexType_ ) - , transformData( transformData_ ) - , transformOffset( transformOffset_ ) + : pNext{ pNext_ } + , vertexData{ vertexData_ } + , vertexOffset{ vertexOffset_ } + , vertexCount{ vertexCount_ } + , vertexStride{ vertexStride_ } + , vertexFormat{ vertexFormat_ } + , indexData{ indexData_ } + , indexOffset{ indexOffset_ } + , indexCount{ indexCount_ } + , indexType{ indexType_ } + , transformData{ transformData_ } + , transformOffset{ transformOffset_ } { } @@ -1637,11 +1640,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t stride_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , aabbData( aabbData_ ) - , numAABBs( numAABBs_ ) - , stride( stride_ ) - , offset( offset_ ) + : pNext{ pNext_ } + , aabbData{ aabbData_ } + , numAABBs{ numAABBs_ } + , stride{ stride_ } + , offset{ offset_ } { } @@ -1758,8 +1761,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR GeometryDataNV( VULKAN_HPP_NAMESPACE::GeometryTrianglesNV triangles_ = {}, VULKAN_HPP_NAMESPACE::GeometryAABBNV aabbs_ = {} ) VULKAN_HPP_NOEXCEPT - : triangles( triangles_ ) - , aabbs( aabbs_ ) + : triangles{ triangles_ } + , aabbs{ aabbs_ } { } @@ -1847,10 +1850,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::GeometryDataNV geometry_ = {}, VULKAN_HPP_NAMESPACE::GeometryFlagsKHR flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , geometryType( geometryType_ ) - , geometry( geometry_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , geometryType{ geometryType_ } + , geometry{ geometry_ } + , flags{ flags_ } { } @@ -1965,12 +1968,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t geometryCount_ = {}, const VULKAN_HPP_NAMESPACE::GeometryNV * pGeometries_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , type( type_ ) - , flags( flags_ ) - , instanceCount( instanceCount_ ) - , geometryCount( geometryCount_ ) - , pGeometries( pGeometries_ ) + : pNext{ pNext_ } + , type{ type_ } + , flags{ flags_ } + , instanceCount{ instanceCount_ } + , geometryCount{ geometryCount_ } + , pGeometries{ pGeometries_ } { } @@ -2128,9 +2131,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR AccelerationStructureCreateInfoNV( VULKAN_HPP_NAMESPACE::DeviceSize compactedSize_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureInfoNV info_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , compactedSize( compactedSize_ ) - , info( info_ ) + : pNext{ pNext_ } + , compactedSize{ compactedSize_ } + , info{ info_ } { } @@ -2236,8 +2239,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR AccelerationStructureDeviceAddressInfoKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , accelerationStructure( accelerationStructure_ ) + : pNext{ pNext_ } + , accelerationStructure{ accelerationStructure_ } { } @@ -2334,8 +2337,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR_14 AccelerationStructureGeometryMotionTrianglesDataNV( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR vertexData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , vertexData( vertexData_ ) + : pNext{ pNext_ } + , vertexData{ vertexData_ } { } @@ -2411,7 +2414,7 @@ namespace VULKAN_HPP_NAMESPACE using NativeType = VkTransformMatrixKHR; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR_14 TransformMatrixKHR( std::array, 3> const & matrix_ = {} ) VULKAN_HPP_NOEXCEPT : matrix( matrix_ ) {} + VULKAN_HPP_CONSTEXPR_14 TransformMatrixKHR( std::array, 3> const & matrix_ = {} ) VULKAN_HPP_NOEXCEPT : matrix{ matrix_ } {} VULKAN_HPP_CONSTEXPR_14 TransformMatrixKHR( TransformMatrixKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; @@ -2491,12 +2494,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t instanceShaderBindingTableRecordOffset_ = {}, VULKAN_HPP_NAMESPACE::GeometryInstanceFlagsKHR flags_ = {}, uint64_t accelerationStructureReference_ = {} ) VULKAN_HPP_NOEXCEPT - : transform( transform_ ) - , instanceCustomIndex( instanceCustomIndex_ ) - , mask( mask_ ) - , instanceShaderBindingTableRecordOffset( instanceShaderBindingTableRecordOffset_ ) - , flags( flags_ ) - , accelerationStructureReference( accelerationStructureReference_ ) + : transform{ transform_ } + , instanceCustomIndex{ instanceCustomIndex_ } + , mask{ mask_ } + , instanceShaderBindingTableRecordOffset{ instanceShaderBindingTableRecordOffset_ } + , flags{ flags_ } + , accelerationStructureReference{ accelerationStructureReference_ } { } @@ -2625,13 +2628,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t instanceShaderBindingTableRecordOffset_ = {}, VULKAN_HPP_NAMESPACE::GeometryInstanceFlagsKHR flags_ = {}, uint64_t accelerationStructureReference_ = {} ) VULKAN_HPP_NOEXCEPT - : transformT0( transformT0_ ) - , transformT1( transformT1_ ) - , instanceCustomIndex( instanceCustomIndex_ ) - , mask( mask_ ) - , instanceShaderBindingTableRecordOffset( instanceShaderBindingTableRecordOffset_ ) - , flags( flags_ ) - , accelerationStructureReference( accelerationStructureReference_ ) + : transformT0{ transformT0_ } + , transformT1{ transformT1_ } + , instanceCustomIndex{ instanceCustomIndex_ } + , mask{ mask_ } + , instanceShaderBindingTableRecordOffset{ instanceShaderBindingTableRecordOffset_ } + , flags{ flags_ } + , accelerationStructureReference{ accelerationStructureReference_ } { } @@ -2770,9 +2773,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeNV type_ = VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeNV::eObject, VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , type( type_ ) - , accelerationStructure( accelerationStructure_ ) + : pNext{ pNext_ } + , type{ type_ } + , accelerationStructure{ accelerationStructure_ } { } @@ -2882,9 +2885,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR AccelerationStructureMotionInfoNV( uint32_t maxInstances_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureMotionInfoFlagsNV flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxInstances( maxInstances_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , maxInstances{ maxInstances_ } + , flags{ flags_ } { } @@ -3002,22 +3005,22 @@ namespace VULKAN_HPP_NAMESPACE float tx_ = {}, float ty_ = {}, float tz_ = {} ) VULKAN_HPP_NOEXCEPT - : sx( sx_ ) - , a( a_ ) - , b( b_ ) - , pvx( pvx_ ) - , sy( sy_ ) - , c( c_ ) - , pvy( pvy_ ) - , sz( sz_ ) - , pvz( pvz_ ) - , qx( qx_ ) - , qy( qy_ ) - , qz( qz_ ) - , qw( qw_ ) - , tx( tx_ ) - , ty( ty_ ) - , tz( tz_ ) + : sx{ sx_ } + , a{ a_ } + , b{ b_ } + , pvx{ pvx_ } + , sy{ sy_ } + , c{ c_ } + , pvy{ pvy_ } + , sz{ sz_ } + , pvz{ pvz_ } + , qx{ qx_ } + , qy{ qy_ } + , qz{ qz_ } + , qw{ qw_ } + , tx{ tx_ } + , ty{ ty_ } + , tz{ tz_ } { } @@ -3220,13 +3223,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t instanceShaderBindingTableRecordOffset_ = {}, VULKAN_HPP_NAMESPACE::GeometryInstanceFlagsKHR flags_ = {}, uint64_t accelerationStructureReference_ = {} ) VULKAN_HPP_NOEXCEPT - : transformT0( transformT0_ ) - , transformT1( transformT1_ ) - , instanceCustomIndex( instanceCustomIndex_ ) - , mask( mask_ ) - , instanceShaderBindingTableRecordOffset( instanceShaderBindingTableRecordOffset_ ) - , flags( flags_ ) - , accelerationStructureReference( accelerationStructureReference_ ) + : transformT0{ transformT0_ } + , transformT1{ transformT1_ } + , instanceCustomIndex{ instanceCustomIndex_ } + , mask{ mask_ } + , instanceShaderBindingTableRecordOffset{ instanceShaderBindingTableRecordOffset_ } + , flags{ flags_ } + , accelerationStructureReference{ accelerationStructureReference_ } { } @@ -3426,9 +3429,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::AccelerationStructureMotionInstanceTypeNV type_ = VULKAN_HPP_NAMESPACE::AccelerationStructureMotionInstanceTypeNV::eStatic, VULKAN_HPP_NAMESPACE::AccelerationStructureMotionInstanceFlagsNV flags_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureMotionInstanceDataNV data_ = {} ) VULKAN_HPP_NOEXCEPT - : type( type_ ) - , flags( flags_ ) - , data( data_ ) + : type{ type_ } + , flags{ flags_ } + , data{ data_ } { } @@ -3507,9 +3510,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MicromapUsageEXT( uint32_t count_ = {}, uint32_t subdivisionLevel_ = {}, uint32_t format_ = {} ) VULKAN_HPP_NOEXCEPT - : count( count_ ) - , subdivisionLevel( subdivisionLevel_ ) - , format( format_ ) + : count{ count_ } + , subdivisionLevel{ subdivisionLevel_ } + , format{ format_ } { } @@ -3619,23 +3622,23 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::MicromapUsageEXT * const * ppUsageCounts_ = {}, VULKAN_HPP_NAMESPACE::MicromapEXT micromap_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , displacementBiasAndScaleFormat( displacementBiasAndScaleFormat_ ) - , displacementVectorFormat( displacementVectorFormat_ ) - , displacementBiasAndScaleBuffer( displacementBiasAndScaleBuffer_ ) - , displacementBiasAndScaleStride( displacementBiasAndScaleStride_ ) - , displacementVectorBuffer( displacementVectorBuffer_ ) - , displacementVectorStride( displacementVectorStride_ ) - , displacedMicromapPrimitiveFlags( displacedMicromapPrimitiveFlags_ ) - , displacedMicromapPrimitiveFlagsStride( displacedMicromapPrimitiveFlagsStride_ ) - , indexType( indexType_ ) - , indexBuffer( indexBuffer_ ) - , indexStride( indexStride_ ) - , baseTriangle( baseTriangle_ ) - , usageCountsCount( usageCountsCount_ ) - , pUsageCounts( pUsageCounts_ ) - , ppUsageCounts( ppUsageCounts_ ) - , micromap( micromap_ ) + : pNext{ pNext_ } + , displacementBiasAndScaleFormat{ displacementBiasAndScaleFormat_ } + , displacementVectorFormat{ displacementVectorFormat_ } + , displacementBiasAndScaleBuffer{ displacementBiasAndScaleBuffer_ } + , displacementBiasAndScaleStride{ displacementBiasAndScaleStride_ } + , displacementVectorBuffer{ displacementVectorBuffer_ } + , displacementVectorStride{ displacementVectorStride_ } + , displacedMicromapPrimitiveFlags{ displacedMicromapPrimitiveFlags_ } + , displacedMicromapPrimitiveFlagsStride{ displacedMicromapPrimitiveFlagsStride_ } + , indexType{ indexType_ } + , indexBuffer{ indexBuffer_ } + , indexStride{ indexStride_ } + , baseTriangle{ baseTriangle_ } + , usageCountsCount{ usageCountsCount_ } + , pUsageCounts{ pUsageCounts_ } + , ppUsageCounts{ ppUsageCounts_ } + , micromap{ micromap_ } { } @@ -3946,15 +3949,15 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::MicromapUsageEXT * const * ppUsageCounts_ = {}, VULKAN_HPP_NAMESPACE::MicromapEXT micromap_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , indexType( indexType_ ) - , indexBuffer( indexBuffer_ ) - , indexStride( indexStride_ ) - , baseTriangle( baseTriangle_ ) - , usageCountsCount( usageCountsCount_ ) - , pUsageCounts( pUsageCounts_ ) - , ppUsageCounts( ppUsageCounts_ ) - , micromap( micromap_ ) + : pNext{ pNext_ } + , indexType{ indexType_ } + , indexBuffer{ indexBuffer_ } + , indexStride{ indexStride_ } + , baseTriangle{ baseTriangle_ } + , usageCountsCount{ usageCountsCount_ } + , pUsageCounts{ pUsageCounts_ } + , ppUsageCounts{ ppUsageCounts_ } + , micromap{ micromap_ } { } @@ -4147,8 +4150,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR AccelerationStructureVersionInfoKHR( const uint8_t * pVersionData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pVersionData( pVersionData_ ) + : pNext{ pNext_ } + , pVersionData{ pVersionData_ } { } @@ -4248,12 +4251,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Fence fence_ = {}, uint32_t deviceMask_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , swapchain( swapchain_ ) - , timeout( timeout_ ) - , semaphore( semaphore_ ) - , fence( fence_ ) - , deviceMask( deviceMask_ ) + : pNext{ pNext_ } + , swapchain{ swapchain_ } + , timeout{ timeout_ } + , semaphore{ semaphore_ } + , fence{ fence_ } + , deviceMask{ deviceMask_ } { } @@ -4385,9 +4388,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR AcquireProfilingLockInfoKHR( VULKAN_HPP_NAMESPACE::AcquireProfilingLockFlagsKHR flags_ = {}, uint64_t timeout_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , timeout( timeout_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , timeout{ timeout_ } { } @@ -4491,12 +4494,12 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkFreeFunction pfnFree_ = {}, PFN_vkInternalAllocationNotification pfnInternalAllocation_ = {}, PFN_vkInternalFreeNotification pfnInternalFree_ = {} ) VULKAN_HPP_NOEXCEPT - : pUserData( pUserData_ ) - , pfnAllocation( pfnAllocation_ ) - , pfnReallocation( pfnReallocation_ ) - , pfnFree( pfnFree_ ) - , pfnInternalAllocation( pfnInternalAllocation_ ) - , pfnInternalFree( pfnInternalFree_ ) + : pUserData{ pUserData_ } + , pfnAllocation{ pfnAllocation_ } + , pfnReallocation{ pfnReallocation_ } + , pfnFree{ pfnFree_ } + , pfnInternalAllocation{ pfnInternalAllocation_ } + , pfnInternalFree{ pfnInternalFree_ } { } @@ -4614,9 +4617,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR AmigoProfilingSubmitInfoSEC( uint64_t firstDrawTimestamp_ = {}, uint64_t swapBufferTimestamp_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , firstDrawTimestamp( firstDrawTimestamp_ ) - , swapBufferTimestamp( swapBufferTimestamp_ ) + : pNext{ pNext_ } + , firstDrawTimestamp{ firstDrawTimestamp_ } + , swapBufferTimestamp{ swapBufferTimestamp_ } { } @@ -4719,10 +4722,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ComponentSwizzle g_ = VULKAN_HPP_NAMESPACE::ComponentSwizzle::eIdentity, VULKAN_HPP_NAMESPACE::ComponentSwizzle b_ = VULKAN_HPP_NAMESPACE::ComponentSwizzle::eIdentity, VULKAN_HPP_NAMESPACE::ComponentSwizzle a_ = VULKAN_HPP_NAMESPACE::ComponentSwizzle::eIdentity ) VULKAN_HPP_NOEXCEPT - : r( r_ ) - , g( g_ ) - , b( b_ ) - , a( a_ ) + : r{ r_ } + , g{ g_ } + , b{ b_ } + , a{ a_ } { } @@ -4834,15 +4837,15 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ChromaLocation suggestedXChromaOffset_ = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven, VULKAN_HPP_NAMESPACE::ChromaLocation suggestedYChromaOffset_ = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , format( format_ ) - , externalFormat( externalFormat_ ) - , formatFeatures( formatFeatures_ ) - , samplerYcbcrConversionComponents( samplerYcbcrConversionComponents_ ) - , suggestedYcbcrModel( suggestedYcbcrModel_ ) - , suggestedYcbcrRange( suggestedYcbcrRange_ ) - , suggestedXChromaOffset( suggestedXChromaOffset_ ) - , suggestedYChromaOffset( suggestedYChromaOffset_ ) + : pNext{ pNext_ } + , format{ format_ } + , externalFormat{ externalFormat_ } + , formatFeatures{ formatFeatures_ } + , samplerYcbcrConversionComponents{ samplerYcbcrConversionComponents_ } + , suggestedYcbcrModel{ suggestedYcbcrModel_ } + , suggestedYcbcrRange{ suggestedYcbcrRange_ } + , suggestedXChromaOffset{ suggestedXChromaOffset_ } + , suggestedYChromaOffset{ suggestedYChromaOffset_ } { } @@ -4963,15 +4966,15 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ChromaLocation suggestedXChromaOffset_ = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven, VULKAN_HPP_NAMESPACE::ChromaLocation suggestedYChromaOffset_ = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , format( format_ ) - , externalFormat( externalFormat_ ) - , formatFeatures( formatFeatures_ ) - , samplerYcbcrConversionComponents( samplerYcbcrConversionComponents_ ) - , suggestedYcbcrModel( suggestedYcbcrModel_ ) - , suggestedYcbcrRange( suggestedYcbcrRange_ ) - , suggestedXChromaOffset( suggestedXChromaOffset_ ) - , suggestedYChromaOffset( suggestedYChromaOffset_ ) + : pNext{ pNext_ } + , format{ format_ } + , externalFormat{ externalFormat_ } + , formatFeatures{ formatFeatures_ } + , samplerYcbcrConversionComponents{ samplerYcbcrConversionComponents_ } + , suggestedYcbcrModel{ suggestedYcbcrModel_ } + , suggestedYcbcrRange{ suggestedYcbcrRange_ } + , suggestedXChromaOffset{ suggestedXChromaOffset_ } + , suggestedYChromaOffset{ suggestedYChromaOffset_ } { } @@ -5084,8 +5087,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR AndroidHardwareBufferFormatResolvePropertiesANDROID( VULKAN_HPP_NAMESPACE::Format colorAttachmentFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , colorAttachmentFormat( colorAttachmentFormat_ ) + : pNext{ pNext_ } + , colorAttachmentFormat{ colorAttachmentFormat_ } { } @@ -5172,9 +5175,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR AndroidHardwareBufferPropertiesANDROID( VULKAN_HPP_NAMESPACE::DeviceSize allocationSize_ = {}, uint32_t memoryTypeBits_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , allocationSize( allocationSize_ ) - , memoryTypeBits( memoryTypeBits_ ) + : pNext{ pNext_ } + , allocationSize{ allocationSize_ } + , memoryTypeBits{ memoryTypeBits_ } { } @@ -5258,8 +5261,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR AndroidHardwareBufferUsageANDROID( uint64_t androidHardwareBufferUsage_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , androidHardwareBufferUsage( androidHardwareBufferUsage_ ) + : pNext{ pNext_ } + , androidHardwareBufferUsage{ androidHardwareBufferUsage_ } { } @@ -5344,9 +5347,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR AndroidSurfaceCreateInfoKHR( VULKAN_HPP_NAMESPACE::AndroidSurfaceCreateFlagsKHR flags_ = {}, struct ANativeWindow * window_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , window( window_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , window{ window_ } { } @@ -5443,6 +5446,230 @@ namespace VULKAN_HPP_NAMESPACE }; #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + struct AntiLagPresentationInfoAMD + { + using NativeType = VkAntiLagPresentationInfoAMD; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAntiLagPresentationInfoAMD; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AntiLagPresentationInfoAMD( VULKAN_HPP_NAMESPACE::AntiLagStageAMD stage_ = VULKAN_HPP_NAMESPACE::AntiLagStageAMD::eInput, + uint64_t frameIndex_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , stage{ stage_ } + , frameIndex{ frameIndex_ } + { + } + + VULKAN_HPP_CONSTEXPR AntiLagPresentationInfoAMD( AntiLagPresentationInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AntiLagPresentationInfoAMD( VkAntiLagPresentationInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT + : AntiLagPresentationInfoAMD( *reinterpret_cast( &rhs ) ) + { + } + + AntiLagPresentationInfoAMD & operator=( AntiLagPresentationInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + AntiLagPresentationInfoAMD & operator=( VkAntiLagPresentationInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 AntiLagPresentationInfoAMD & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AntiLagPresentationInfoAMD & setStage( VULKAN_HPP_NAMESPACE::AntiLagStageAMD stage_ ) VULKAN_HPP_NOEXCEPT + { + stage = stage_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AntiLagPresentationInfoAMD & setFrameIndex( uint64_t frameIndex_ ) VULKAN_HPP_NOEXCEPT + { + frameIndex = frameIndex_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkAntiLagPresentationInfoAMD const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAntiLagPresentationInfoAMD &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, stage, frameIndex ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( AntiLagPresentationInfoAMD const & ) const = default; +#else + bool operator==( AntiLagPresentationInfoAMD const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( stage == rhs.stage ) && ( frameIndex == rhs.frameIndex ); +# endif + } + + bool operator!=( AntiLagPresentationInfoAMD const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAntiLagPresentationInfoAMD; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::AntiLagStageAMD stage = VULKAN_HPP_NAMESPACE::AntiLagStageAMD::eInput; + uint64_t frameIndex = {}; + }; + + template <> + struct CppType + { + using Type = AntiLagPresentationInfoAMD; + }; + + struct AntiLagDataAMD + { + using NativeType = VkAntiLagDataAMD; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAntiLagDataAMD; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AntiLagDataAMD( VULKAN_HPP_NAMESPACE::AntiLagModeAMD mode_ = VULKAN_HPP_NAMESPACE::AntiLagModeAMD::eDriverControl, + uint32_t maxFPS_ = {}, + const VULKAN_HPP_NAMESPACE::AntiLagPresentationInfoAMD * pPresentationInfo_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , mode{ mode_ } + , maxFPS{ maxFPS_ } + , pPresentationInfo{ pPresentationInfo_ } + { + } + + VULKAN_HPP_CONSTEXPR AntiLagDataAMD( AntiLagDataAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AntiLagDataAMD( VkAntiLagDataAMD const & rhs ) VULKAN_HPP_NOEXCEPT : AntiLagDataAMD( *reinterpret_cast( &rhs ) ) {} + + AntiLagDataAMD & operator=( AntiLagDataAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + AntiLagDataAMD & operator=( VkAntiLagDataAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 AntiLagDataAMD & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AntiLagDataAMD & setMode( VULKAN_HPP_NAMESPACE::AntiLagModeAMD mode_ ) VULKAN_HPP_NOEXCEPT + { + mode = mode_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AntiLagDataAMD & setMaxFPS( uint32_t maxFPS_ ) VULKAN_HPP_NOEXCEPT + { + maxFPS = maxFPS_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AntiLagDataAMD & + setPPresentationInfo( const VULKAN_HPP_NAMESPACE::AntiLagPresentationInfoAMD * pPresentationInfo_ ) VULKAN_HPP_NOEXCEPT + { + pPresentationInfo = pPresentationInfo_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkAntiLagDataAMD const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAntiLagDataAMD &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, mode, maxFPS, pPresentationInfo ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( AntiLagDataAMD const & ) const = default; +#else + bool operator==( AntiLagDataAMD const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( mode == rhs.mode ) && ( maxFPS == rhs.maxFPS ) && + ( pPresentationInfo == rhs.pPresentationInfo ); +# endif + } + + bool operator!=( AntiLagDataAMD const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAntiLagDataAMD; + const void * pNext = {}; + VULKAN_HPP_NAMESPACE::AntiLagModeAMD mode = VULKAN_HPP_NAMESPACE::AntiLagModeAMD::eDriverControl; + uint32_t maxFPS = {}; + const VULKAN_HPP_NAMESPACE::AntiLagPresentationInfoAMD * pPresentationInfo = {}; + }; + + template <> + struct CppType + { + using Type = AntiLagDataAMD; + }; + struct ApplicationInfo { using NativeType = VkApplicationInfo; @@ -5457,12 +5684,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t engineVersion_ = {}, uint32_t apiVersion_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pApplicationName( pApplicationName_ ) - , applicationVersion( applicationVersion_ ) - , pEngineName( pEngineName_ ) - , engineVersion( engineVersion_ ) - , apiVersion( apiVersion_ ) + : pNext{ pNext_ } + , pApplicationName{ pApplicationName_ } + , applicationVersion{ applicationVersion_ } + , pEngineName{ pEngineName_ } + , engineVersion{ engineVersion_ } + , apiVersion{ apiVersion_ } { } @@ -5613,15 +5840,15 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::AttachmentStoreOp stencilStoreOp_ = VULKAN_HPP_NAMESPACE::AttachmentStoreOp::eStore, VULKAN_HPP_NAMESPACE::ImageLayout initialLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::ImageLayout finalLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined ) VULKAN_HPP_NOEXCEPT - : flags( flags_ ) - , format( format_ ) - , samples( samples_ ) - , loadOp( loadOp_ ) - , storeOp( storeOp_ ) - , stencilLoadOp( stencilLoadOp_ ) - , stencilStoreOp( stencilStoreOp_ ) - , initialLayout( initialLayout_ ) - , finalLayout( finalLayout_ ) + : flags{ flags_ } + , format{ format_ } + , samples{ samples_ } + , loadOp{ loadOp_ } + , storeOp{ storeOp_ } + , stencilLoadOp{ stencilLoadOp_ } + , stencilStoreOp{ stencilStoreOp_ } + , initialLayout{ initialLayout_ } + , finalLayout{ finalLayout_ } { } @@ -5777,16 +6004,16 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageLayout initialLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::ImageLayout finalLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , format( format_ ) - , samples( samples_ ) - , loadOp( loadOp_ ) - , storeOp( storeOp_ ) - , stencilLoadOp( stencilLoadOp_ ) - , stencilStoreOp( stencilStoreOp_ ) - , initialLayout( initialLayout_ ) - , finalLayout( finalLayout_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , format{ format_ } + , samples{ samples_ } + , loadOp{ loadOp_ } + , storeOp{ storeOp_ } + , stencilLoadOp{ stencilLoadOp_ } + , stencilStoreOp{ stencilStoreOp_ } + , initialLayout{ initialLayout_ } + , finalLayout{ finalLayout_ } { } @@ -5954,9 +6181,9 @@ namespace VULKAN_HPP_NAMESPACE AttachmentDescriptionStencilLayout( VULKAN_HPP_NAMESPACE::ImageLayout stencilInitialLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::ImageLayout stencilFinalLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stencilInitialLayout( stencilInitialLayout_ ) - , stencilFinalLayout( stencilFinalLayout_ ) + : pNext{ pNext_ } + , stencilInitialLayout{ stencilInitialLayout_ } + , stencilFinalLayout{ stencilFinalLayout_ } { } @@ -6062,8 +6289,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR AttachmentReference( uint32_t attachment_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout layout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined ) VULKAN_HPP_NOEXCEPT - : attachment( attachment_ ) - , layout( layout_ ) + : attachment{ attachment_ } + , layout{ layout_ } { } @@ -6153,10 +6380,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageLayout layout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , attachment( attachment_ ) - , layout( layout_ ) - , aspectMask( aspectMask_ ) + : pNext{ pNext_ } + , attachment{ attachment_ } + , layout{ layout_ } + , aspectMask{ aspectMask_ } { } @@ -6273,8 +6500,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR AttachmentReferenceStencilLayout( VULKAN_HPP_NAMESPACE::ImageLayout stencilLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stencilLayout( stencilLayout_ ) + : pNext{ pNext_ } + , stencilLayout{ stencilLayout_ } { } @@ -6375,10 +6602,10 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::SampleCountFlagBits * pColorAttachmentSamples_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlagBits depthStencilAttachmentSamples_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , colorAttachmentCount( colorAttachmentCount_ ) - , pColorAttachmentSamples( pColorAttachmentSamples_ ) - , depthStencilAttachmentSamples( depthStencilAttachmentSamples_ ) + : pNext{ pNext_ } + , colorAttachmentCount{ colorAttachmentCount_ } + , pColorAttachmentSamples{ pColorAttachmentSamples_ } + , depthStencilAttachmentSamples{ depthStencilAttachmentSamples_ } { } @@ -6516,8 +6743,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR Extent2D( uint32_t width_ = {}, uint32_t height_ = {} ) VULKAN_HPP_NOEXCEPT - : width( width_ ) - , height( height_ ) + : width{ width_ } + , height{ height_ } { } @@ -6599,8 +6826,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SampleLocationEXT( float x_ = {}, float y_ = {} ) VULKAN_HPP_NOEXCEPT - : x( x_ ) - , y( y_ ) + : x{ x_ } + , y{ y_ } { } @@ -6690,11 +6917,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t sampleLocationsCount_ = {}, const VULKAN_HPP_NAMESPACE::SampleLocationEXT * pSampleLocations_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , sampleLocationsPerPixel( sampleLocationsPerPixel_ ) - , sampleLocationGridSize( sampleLocationGridSize_ ) - , sampleLocationsCount( sampleLocationsCount_ ) - , pSampleLocations( pSampleLocations_ ) + : pNext{ pNext_ } + , sampleLocationsPerPixel{ sampleLocationsPerPixel_ } + , sampleLocationGridSize{ sampleLocationGridSize_ } + , sampleLocationsCount{ sampleLocationsCount_ } + , pSampleLocations{ pSampleLocations_ } { } @@ -6842,8 +7069,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR AttachmentSampleLocationsEXT( uint32_t attachmentIndex_ = {}, VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT sampleLocationsInfo_ = {} ) VULKAN_HPP_NOEXCEPT - : attachmentIndex( attachmentIndex_ ) - , sampleLocationsInfo( sampleLocationsInfo_ ) + : attachmentIndex{ attachmentIndex_ } + , sampleLocationsInfo{ sampleLocationsInfo_ } { } @@ -6930,8 +7157,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) BaseInStructure( VULKAN_HPP_NAMESPACE::StructureType sType_ = VULKAN_HPP_NAMESPACE::StructureType::eApplicationInfo, const struct VULKAN_HPP_NAMESPACE::BaseInStructure * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : sType( sType_ ) - , pNext( pNext_ ) + : sType{ sType_ } + , pNext{ pNext_ } { } @@ -7008,8 +7235,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) BaseOutStructure( VULKAN_HPP_NAMESPACE::StructureType sType_ = VULKAN_HPP_NAMESPACE::StructureType::eApplicationInfo, struct VULKAN_HPP_NAMESPACE::BaseOutStructure * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : sType( sType_ ) - , pNext( pNext_ ) + : sType{ sType_ } + , pNext{ pNext_ } { } @@ -7093,12 +7320,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t deviceIndexCount_ = {}, const uint32_t * pDeviceIndices_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , accelerationStructure( accelerationStructure_ ) - , memory( memory_ ) - , memoryOffset( memoryOffset_ ) - , deviceIndexCount( deviceIndexCount_ ) - , pDeviceIndices( pDeviceIndices_ ) + : pNext{ pNext_ } + , accelerationStructure{ accelerationStructure_ } + , memory{ memory_ } + , memoryOffset{ memoryOffset_ } + , deviceIndexCount{ deviceIndexCount_ } + , pDeviceIndices{ pDeviceIndices_ } { } @@ -7257,9 +7484,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR BindBufferMemoryDeviceGroupInfo( uint32_t deviceIndexCount_ = {}, const uint32_t * pDeviceIndices_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceIndexCount( deviceIndexCount_ ) - , pDeviceIndices( pDeviceIndices_ ) + : pNext{ pNext_ } + , deviceIndexCount{ deviceIndexCount_ } + , pDeviceIndices{ pDeviceIndices_ } { } @@ -7383,10 +7610,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , buffer( buffer_ ) - , memory( memory_ ) - , memoryOffset( memoryOffset_ ) + : pNext{ pNext_ } + , buffer{ buffer_ } + , memory{ memory_ } + , memoryOffset{ memoryOffset_ } { } @@ -7504,10 +7731,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, uint32_t set_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stageFlags( stageFlags_ ) - , layout( layout_ ) - , set( set_ ) + : pNext{ pNext_ } + , stageFlags{ stageFlags_ } + , layout{ layout_ } + , set{ set_ } { } @@ -7628,14 +7855,14 @@ namespace VULKAN_HPP_NAMESPACE uint32_t dynamicOffsetCount_ = {}, const uint32_t * pDynamicOffsets_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stageFlags( stageFlags_ ) - , layout( layout_ ) - , firstSet( firstSet_ ) - , descriptorSetCount( descriptorSetCount_ ) - , pDescriptorSets( pDescriptorSets_ ) - , dynamicOffsetCount( dynamicOffsetCount_ ) - , pDynamicOffsets( pDynamicOffsets_ ) + : pNext{ pNext_ } + , stageFlags{ stageFlags_ } + , layout{ layout_ } + , firstSet{ firstSet_ } + , descriptorSetCount{ descriptorSetCount_ } + , pDescriptorSets{ pDescriptorSets_ } + , dynamicOffsetCount{ dynamicOffsetCount_ } + , pDynamicOffsets{ pDynamicOffsets_ } { } @@ -7817,8 +8044,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR Offset2D( int32_t x_ = {}, int32_t y_ = {} ) VULKAN_HPP_NOEXCEPT - : x( x_ ) - , y( y_ ) + : x{ x_ } + , y{ y_ } { } @@ -7900,8 +8127,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR Rect2D( VULKAN_HPP_NAMESPACE::Offset2D offset_ = {}, VULKAN_HPP_NAMESPACE::Extent2D extent_ = {} ) VULKAN_HPP_NOEXCEPT - : offset( offset_ ) - , extent( extent_ ) + : offset{ offset_ } + , extent{ extent_ } { } @@ -7990,11 +8217,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t splitInstanceBindRegionCount_ = {}, const VULKAN_HPP_NAMESPACE::Rect2D * pSplitInstanceBindRegions_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceIndexCount( deviceIndexCount_ ) - , pDeviceIndices( pDeviceIndices_ ) - , splitInstanceBindRegionCount( splitInstanceBindRegionCount_ ) - , pSplitInstanceBindRegions( pSplitInstanceBindRegions_ ) + : pNext{ pNext_ } + , deviceIndexCount{ deviceIndexCount_ } + , pDeviceIndices{ pDeviceIndices_ } + , splitInstanceBindRegionCount{ splitInstanceBindRegionCount_ } + , pSplitInstanceBindRegions{ pSplitInstanceBindRegions_ } { } @@ -8155,10 +8382,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , image( image_ ) - , memory( memory_ ) - , memoryOffset( memoryOffset_ ) + : pNext{ pNext_ } + , image{ image_ } + , memory{ memory_ } + , memoryOffset{ memoryOffset_ } { } @@ -8274,9 +8501,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR BindImageMemorySwapchainInfoKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain_ = {}, uint32_t imageIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , swapchain( swapchain_ ) - , imageIndex( imageIndex_ ) + : pNext{ pNext_ } + , swapchain{ swapchain_ } + , imageIndex{ imageIndex_ } { } @@ -8379,8 +8606,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR BindImagePlaneMemoryInfo( VULKAN_HPP_NAMESPACE::ImageAspectFlagBits planeAspect_ = VULKAN_HPP_NAMESPACE::ImageAspectFlagBits::eColor, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , planeAspect( planeAspect_ ) + : pNext{ pNext_ } + , planeAspect{ planeAspect_ } { } @@ -8477,9 +8704,9 @@ namespace VULKAN_HPP_NAMESPACE BindIndexBufferIndirectCommandNV( VULKAN_HPP_NAMESPACE::DeviceAddress bufferAddress_ = {}, uint32_t size_ = {}, VULKAN_HPP_NAMESPACE::IndexType indexType_ = VULKAN_HPP_NAMESPACE::IndexType::eUint16 ) VULKAN_HPP_NOEXCEPT - : bufferAddress( bufferAddress_ ) - , size( size_ ) - , indexType( indexType_ ) + : bufferAddress{ bufferAddress_ } + , size{ size_ } + , indexType{ indexType_ } { } @@ -8574,8 +8801,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR BindMemoryStatusKHR( VULKAN_HPP_NAMESPACE::Result * pResult_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pResult( pResult_ ) + : pNext{ pNext_ } + , pResult{ pResult_ } { } @@ -8666,7 +8893,7 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR BindPipelineIndirectCommandNV( VULKAN_HPP_NAMESPACE::DeviceAddress pipelineAddress_ = {} ) VULKAN_HPP_NOEXCEPT - : pipelineAddress( pipelineAddress_ ) + : pipelineAddress{ pipelineAddress_ } { } @@ -8743,7 +8970,7 @@ namespace VULKAN_HPP_NAMESPACE using NativeType = VkBindShaderGroupIndirectCommandNV; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR BindShaderGroupIndirectCommandNV( uint32_t groupIndex_ = {} ) VULKAN_HPP_NOEXCEPT : groupIndex( groupIndex_ ) {} + VULKAN_HPP_CONSTEXPR BindShaderGroupIndirectCommandNV( uint32_t groupIndex_ = {} ) VULKAN_HPP_NOEXCEPT : groupIndex{ groupIndex_ } {} VULKAN_HPP_CONSTEXPR BindShaderGroupIndirectCommandNV( BindShaderGroupIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; @@ -8823,11 +9050,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ = {}, VULKAN_HPP_NAMESPACE::SparseMemoryBindFlags flags_ = {} ) VULKAN_HPP_NOEXCEPT - : resourceOffset( resourceOffset_ ) - , size( size_ ) - , memory( memory_ ) - , memoryOffset( memoryOffset_ ) - , flags( flags_ ) + : resourceOffset{ resourceOffset_ } + , size{ size_ } + , memory{ memory_ } + , memoryOffset{ memoryOffset_ } + , flags{ flags_ } { } @@ -8937,9 +9164,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SparseBufferMemoryBindInfo( VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, uint32_t bindCount_ = {}, const VULKAN_HPP_NAMESPACE::SparseMemoryBind * pBinds_ = {} ) VULKAN_HPP_NOEXCEPT - : buffer( buffer_ ) - , bindCount( bindCount_ ) - , pBinds( pBinds_ ) + : buffer{ buffer_ } + , bindCount{ bindCount_ } + , pBinds{ pBinds_ } { } @@ -9051,9 +9278,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SparseImageOpaqueMemoryBindInfo( VULKAN_HPP_NAMESPACE::Image image_ = {}, uint32_t bindCount_ = {}, const VULKAN_HPP_NAMESPACE::SparseMemoryBind * pBinds_ = {} ) VULKAN_HPP_NOEXCEPT - : image( image_ ) - , bindCount( bindCount_ ) - , pBinds( pBinds_ ) + : image{ image_ } + , bindCount{ bindCount_ } + , pBinds{ pBinds_ } { } @@ -9164,9 +9391,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImageSubresource( VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ = {}, uint32_t mipLevel_ = {}, uint32_t arrayLayer_ = {} ) VULKAN_HPP_NOEXCEPT - : aspectMask( aspectMask_ ) - , mipLevel( mipLevel_ ) - , arrayLayer( arrayLayer_ ) + : aspectMask{ aspectMask_ } + , mipLevel{ mipLevel_ } + , arrayLayer{ arrayLayer_ } { } @@ -9255,9 +9482,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR Offset3D( int32_t x_ = {}, int32_t y_ = {}, int32_t z_ = {} ) VULKAN_HPP_NOEXCEPT - : x( x_ ) - , y( y_ ) - , z( z_ ) + : x{ x_ } + , y{ y_ } + , z{ z_ } { } @@ -9348,9 +9575,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR Extent3D( uint32_t width_ = {}, uint32_t height_ = {}, uint32_t depth_ = {} ) VULKAN_HPP_NOEXCEPT - : width( width_ ) - , height( height_ ) - , depth( depth_ ) + : width{ width_ } + , height{ height_ } + , depth{ depth_ } { } @@ -9446,12 +9673,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ = {}, VULKAN_HPP_NAMESPACE::SparseMemoryBindFlags flags_ = {} ) VULKAN_HPP_NOEXCEPT - : subresource( subresource_ ) - , offset( offset_ ) - , extent( extent_ ) - , memory( memory_ ) - , memoryOffset( memoryOffset_ ) - , flags( flags_ ) + : subresource{ subresource_ } + , offset{ offset_ } + , extent{ extent_ } + , memory{ memory_ } + , memoryOffset{ memoryOffset_ } + , flags{ flags_ } { } @@ -9572,9 +9799,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SparseImageMemoryBindInfo( VULKAN_HPP_NAMESPACE::Image image_ = {}, uint32_t bindCount_ = {}, const VULKAN_HPP_NAMESPACE::SparseImageMemoryBind * pBinds_ = {} ) VULKAN_HPP_NOEXCEPT - : image( image_ ) - , bindCount( bindCount_ ) - , pBinds( pBinds_ ) + : image{ image_ } + , bindCount{ bindCount_ } + , pBinds{ pBinds_ } { } @@ -9697,17 +9924,17 @@ namespace VULKAN_HPP_NAMESPACE uint32_t signalSemaphoreCount_ = {}, const VULKAN_HPP_NAMESPACE::Semaphore * pSignalSemaphores_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , waitSemaphoreCount( waitSemaphoreCount_ ) - , pWaitSemaphores( pWaitSemaphores_ ) - , bufferBindCount( bufferBindCount_ ) - , pBufferBinds( pBufferBinds_ ) - , imageOpaqueBindCount( imageOpaqueBindCount_ ) - , pImageOpaqueBinds( pImageOpaqueBinds_ ) - , imageBindCount( imageBindCount_ ) - , pImageBinds( pImageBinds_ ) - , signalSemaphoreCount( signalSemaphoreCount_ ) - , pSignalSemaphores( pSignalSemaphores_ ) + : pNext{ pNext_ } + , waitSemaphoreCount{ waitSemaphoreCount_ } + , pWaitSemaphores{ pWaitSemaphores_ } + , bufferBindCount{ bufferBindCount_ } + , pBufferBinds{ pBufferBinds_ } + , imageOpaqueBindCount{ imageOpaqueBindCount_ } + , pImageOpaqueBinds{ pImageOpaqueBinds_ } + , imageBindCount{ imageBindCount_ } + , pImageBinds{ pImageBinds_ } + , signalSemaphoreCount{ signalSemaphoreCount_ } + , pSignalSemaphores{ pSignalSemaphores_ } { } @@ -9960,9 +10187,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR BindVertexBufferIndirectCommandNV( VULKAN_HPP_NAMESPACE::DeviceAddress bufferAddress_ = {}, uint32_t size_ = {}, uint32_t stride_ = {} ) VULKAN_HPP_NOEXCEPT - : bufferAddress( bufferAddress_ ) - , size( size_ ) - , stride( stride_ ) + : bufferAddress{ bufferAddress_ } + , size{ size_ } + , stride{ stride_ } { } @@ -10061,11 +10288,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize memorySize_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memoryBindIndex( memoryBindIndex_ ) - , memory( memory_ ) - , memoryOffset( memoryOffset_ ) - , memorySize( memorySize_ ) + : pNext{ pNext_ } + , memoryBindIndex{ memoryBindIndex_ } + , memory{ memory_ } + , memoryOffset{ memoryOffset_ } + , memorySize{ memorySize_ } { } @@ -10189,8 +10416,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR BlitImageCubicWeightsInfoQCOM( VULKAN_HPP_NAMESPACE::CubicFilterWeightsQCOM cubicWeights_ = VULKAN_HPP_NAMESPACE::CubicFilterWeightsQCOM::eCatmullRom, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , cubicWeights( cubicWeights_ ) + : pNext{ pNext_ } + , cubicWeights{ cubicWeights_ } { } @@ -10285,10 +10512,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t mipLevel_ = {}, uint32_t baseArrayLayer_ = {}, uint32_t layerCount_ = {} ) VULKAN_HPP_NOEXCEPT - : aspectMask( aspectMask_ ) - , mipLevel( mipLevel_ ) - , baseArrayLayer( baseArrayLayer_ ) - , layerCount( layerCount_ ) + : aspectMask{ aspectMask_ } + , mipLevel{ mipLevel_ } + , baseArrayLayer{ baseArrayLayer_ } + , layerCount{ layerCount_ } { } @@ -10394,11 +10621,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource_ = {}, std::array const & dstOffsets_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcSubresource( srcSubresource_ ) - , srcOffsets( srcOffsets_ ) - , dstSubresource( dstSubresource_ ) - , dstOffsets( dstOffsets_ ) + : pNext{ pNext_ } + , srcSubresource{ srcSubresource_ } + , srcOffsets{ srcOffsets_ } + , dstSubresource{ dstSubresource_ } + , dstOffsets{ dstOffsets_ } { } @@ -10526,14 +10753,14 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::ImageBlit2 * pRegions_ = {}, VULKAN_HPP_NAMESPACE::Filter filter_ = VULKAN_HPP_NAMESPACE::Filter::eNearest, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcImage( srcImage_ ) - , srcImageLayout( srcImageLayout_ ) - , dstImage( dstImage_ ) - , dstImageLayout( dstImageLayout_ ) - , regionCount( regionCount_ ) - , pRegions( pRegions_ ) - , filter( filter_ ) + : pNext{ pNext_ } + , srcImage{ srcImage_ } + , srcImageLayout{ srcImageLayout_ } + , dstImage{ dstImage_ } + , dstImageLayout{ dstImageLayout_ } + , regionCount{ regionCount_ } + , pRegions{ pRegions_ } + , filter{ filter_ } { } @@ -10708,8 +10935,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR BufferCaptureDescriptorDataInfoEXT( VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , buffer( buffer_ ) + : pNext{ pNext_ } + , buffer{ buffer_ } { } @@ -10807,9 +11034,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR BufferCollectionBufferCreateInfoFUCHSIA( VULKAN_HPP_NAMESPACE::BufferCollectionFUCHSIA collection_ = {}, uint32_t index_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , collection( collection_ ) - , index( index_ ) + : pNext{ pNext_ } + , collection{ collection_ } + , index{ index_ } { } @@ -10919,12 +11146,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t minBufferCountForDedicatedSlack_ = {}, uint32_t minBufferCountForSharedSlack_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , minBufferCount( minBufferCount_ ) - , maxBufferCount( maxBufferCount_ ) - , minBufferCountForCamping( minBufferCountForCamping_ ) - , minBufferCountForDedicatedSlack( minBufferCountForDedicatedSlack_ ) - , minBufferCountForSharedSlack( minBufferCountForSharedSlack_ ) + : pNext{ pNext_ } + , minBufferCount{ minBufferCount_ } + , maxBufferCount{ maxBufferCount_ } + , minBufferCountForCamping{ minBufferCountForCamping_ } + , minBufferCountForDedicatedSlack{ minBufferCountForDedicatedSlack_ } + , minBufferCountForSharedSlack{ minBufferCountForSharedSlack_ } { } @@ -11059,8 +11286,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR BufferCollectionCreateInfoFUCHSIA( zx_handle_t collectionToken_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , collectionToken( collectionToken_ ) + : pNext{ pNext_ } + , collectionToken{ collectionToken_ } { } @@ -11165,9 +11392,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR BufferCollectionImageCreateInfoFUCHSIA( VULKAN_HPP_NAMESPACE::BufferCollectionFUCHSIA collection_ = {}, uint32_t index_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , collection( collection_ ) - , index( index_ ) + : pNext{ pNext_ } + , collection{ collection_ } + , index{ index_ } { } @@ -11272,8 +11499,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SysmemColorSpaceFUCHSIA( uint32_t colorSpace_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , colorSpace( colorSpace_ ) + : pNext{ pNext_ } + , colorSpace{ colorSpace_ } { } @@ -11382,18 +11609,18 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ChromaLocation suggestedXChromaOffset_ = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven, VULKAN_HPP_NAMESPACE::ChromaLocation suggestedYChromaOffset_ = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memoryTypeBits( memoryTypeBits_ ) - , bufferCount( bufferCount_ ) - , createInfoIndex( createInfoIndex_ ) - , sysmemPixelFormat( sysmemPixelFormat_ ) - , formatFeatures( formatFeatures_ ) - , sysmemColorSpaceIndex( sysmemColorSpaceIndex_ ) - , samplerYcbcrConversionComponents( samplerYcbcrConversionComponents_ ) - , suggestedYcbcrModel( suggestedYcbcrModel_ ) - , suggestedYcbcrRange( suggestedYcbcrRange_ ) - , suggestedXChromaOffset( suggestedXChromaOffset_ ) - , suggestedYChromaOffset( suggestedYChromaOffset_ ) + : pNext{ pNext_ } + , memoryTypeBits{ memoryTypeBits_ } + , bufferCount{ bufferCount_ } + , createInfoIndex{ createInfoIndex_ } + , sysmemPixelFormat{ sysmemPixelFormat_ } + , formatFeatures{ formatFeatures_ } + , sysmemColorSpaceIndex{ sysmemColorSpaceIndex_ } + , samplerYcbcrConversionComponents{ samplerYcbcrConversionComponents_ } + , suggestedYcbcrModel{ suggestedYcbcrModel_ } + , suggestedYcbcrRange{ suggestedYcbcrRange_ } + , suggestedXChromaOffset{ suggestedXChromaOffset_ } + , suggestedYChromaOffset{ suggestedYChromaOffset_ } { } @@ -11519,13 +11746,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t queueFamilyIndexCount_ = {}, const uint32_t * pQueueFamilyIndices_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , size( size_ ) - , usage( usage_ ) - , sharingMode( sharingMode_ ) - , queueFamilyIndexCount( queueFamilyIndexCount_ ) - , pQueueFamilyIndices( pQueueFamilyIndices_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , size{ size_ } + , usage{ usage_ } + , sharingMode{ sharingMode_ } + , queueFamilyIndexCount{ queueFamilyIndexCount_ } + , pQueueFamilyIndices{ pQueueFamilyIndices_ } { } @@ -11691,10 +11918,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::FormatFeatureFlags requiredFormatFeatures_ = {}, VULKAN_HPP_NAMESPACE::BufferCollectionConstraintsInfoFUCHSIA bufferCollectionConstraints_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , createInfo( createInfo_ ) - , requiredFormatFeatures( requiredFormatFeatures_ ) - , bufferCollectionConstraints( bufferCollectionConstraints_ ) + : pNext{ pNext_ } + , createInfo{ createInfo_ } + , requiredFormatFeatures{ requiredFormatFeatures_ } + , bufferCollectionConstraints{ bufferCollectionConstraints_ } { } @@ -11810,9 +12037,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR BufferCopy( VULKAN_HPP_NAMESPACE::DeviceSize srcOffset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {} ) VULKAN_HPP_NOEXCEPT - : srcOffset( srcOffset_ ) - , dstOffset( dstOffset_ ) - , size( size_ ) + : srcOffset{ srcOffset_ } + , dstOffset{ dstOffset_ } + , size{ size_ } { } @@ -11907,10 +12134,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize dstOffset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcOffset( srcOffset_ ) - , dstOffset( dstOffset_ ) - , size( size_ ) + : pNext{ pNext_ } + , srcOffset{ srcOffset_ } + , dstOffset{ dstOffset_ } + , size{ size_ } { } @@ -12023,8 +12250,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR BufferDeviceAddressCreateInfoEXT( VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceAddress( deviceAddress_ ) + : pNext{ pNext_ } + , deviceAddress{ deviceAddress_ } { } @@ -12119,8 +12346,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR BufferDeviceAddressInfo( VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , buffer( buffer_ ) + : pNext{ pNext_ } + , buffer{ buffer_ } { } @@ -12220,12 +12447,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageSubresourceLayers imageSubresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D imageOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D imageExtent_ = {} ) VULKAN_HPP_NOEXCEPT - : bufferOffset( bufferOffset_ ) - , bufferRowLength( bufferRowLength_ ) - , bufferImageHeight( bufferImageHeight_ ) - , imageSubresource( imageSubresource_ ) - , imageOffset( imageOffset_ ) - , imageExtent( imageExtent_ ) + : bufferOffset{ bufferOffset_ } + , bufferRowLength{ bufferRowLength_ } + , bufferImageHeight{ bufferImageHeight_ } + , imageSubresource{ imageSubresource_ } + , imageOffset{ imageOffset_ } + , imageExtent{ imageExtent_ } { } @@ -12350,13 +12577,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Offset3D imageOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D imageExtent_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , bufferOffset( bufferOffset_ ) - , bufferRowLength( bufferRowLength_ ) - , bufferImageHeight( bufferImageHeight_ ) - , imageSubresource( imageSubresource_ ) - , imageOffset( imageOffset_ ) - , imageExtent( imageExtent_ ) + : pNext{ pNext_ } + , bufferOffset{ bufferOffset_ } + , bufferRowLength{ bufferRowLength_ } + , bufferImageHeight{ bufferImageHeight_ } + , imageSubresource{ imageSubresource_ } + , imageOffset{ imageOffset_ } + , imageExtent{ imageExtent_ } { } @@ -12501,14 +12728,14 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcAccessMask( srcAccessMask_ ) - , dstAccessMask( dstAccessMask_ ) - , srcQueueFamilyIndex( srcQueueFamilyIndex_ ) - , dstQueueFamilyIndex( dstQueueFamilyIndex_ ) - , buffer( buffer_ ) - , offset( offset_ ) - , size( size_ ) + : pNext{ pNext_ } + , srcAccessMask{ srcAccessMask_ } + , dstAccessMask{ dstAccessMask_ } + , srcQueueFamilyIndex{ srcQueueFamilyIndex_ } + , dstQueueFamilyIndex{ dstQueueFamilyIndex_ } + , buffer{ buffer_ } + , offset{ offset_ } + , size{ size_ } { } @@ -12663,16 +12890,16 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcStageMask( srcStageMask_ ) - , srcAccessMask( srcAccessMask_ ) - , dstStageMask( dstStageMask_ ) - , dstAccessMask( dstAccessMask_ ) - , srcQueueFamilyIndex( srcQueueFamilyIndex_ ) - , dstQueueFamilyIndex( dstQueueFamilyIndex_ ) - , buffer( buffer_ ) - , offset( offset_ ) - , size( size_ ) + : pNext{ pNext_ } + , srcStageMask{ srcStageMask_ } + , srcAccessMask{ srcAccessMask_ } + , dstStageMask{ dstStageMask_ } + , dstAccessMask{ dstAccessMask_ } + , srcQueueFamilyIndex{ srcQueueFamilyIndex_ } + , dstQueueFamilyIndex{ dstQueueFamilyIndex_ } + , buffer{ buffer_ } + , offset{ offset_ } + , size{ size_ } { } @@ -12837,8 +13064,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR BufferMemoryRequirementsInfo2( VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , buffer( buffer_ ) + : pNext{ pNext_ } + , buffer{ buffer_ } { } @@ -12935,8 +13162,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR BufferOpaqueCaptureAddressCreateInfo( uint64_t opaqueCaptureAddress_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , opaqueCaptureAddress( opaqueCaptureAddress_ ) + : pNext{ pNext_ } + , opaqueCaptureAddress{ opaqueCaptureAddress_ } { } @@ -13034,8 +13261,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR BufferUsageFlags2CreateInfoKHR( VULKAN_HPP_NAMESPACE::BufferUsageFlags2KHR usage_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , usage( usage_ ) + : pNext{ pNext_ } + , usage{ usage_ } { } @@ -13135,12 +13362,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize range_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , buffer( buffer_ ) - , format( format_ ) - , offset( offset_ ) - , range( range_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , buffer{ buffer_ } + , format{ format_ } + , offset{ offset_ } + , range{ range_ } { } @@ -13271,8 +13498,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR CalibratedTimestampInfoKHR( VULKAN_HPP_NAMESPACE::TimeDomainKHR timeDomain_ = VULKAN_HPP_NAMESPACE::TimeDomainKHR::eDevice, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , timeDomain( timeDomain_ ) + : pNext{ pNext_ } + , timeDomain{ timeDomain_ } { } @@ -13370,9 +13597,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR CheckpointData2NV( VULKAN_HPP_NAMESPACE::PipelineStageFlags2 stage_ = {}, void * pCheckpointMarker_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stage( stage_ ) - , pCheckpointMarker( pCheckpointMarker_ ) + : pNext{ pNext_ } + , stage{ stage_ } + , pCheckpointMarker{ pCheckpointMarker_ } { } @@ -13453,9 +13680,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR CheckpointDataNV( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits stage_ = VULKAN_HPP_NAMESPACE::PipelineStageFlagBits::eTopOfPipe, void * pCheckpointMarker_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stage( stage_ ) - , pCheckpointMarker( pCheckpointMarker_ ) + : pNext{ pNext_ } + , stage{ stage_ } + , pCheckpointMarker{ pCheckpointMarker_ } { } @@ -13593,8 +13820,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ClearDepthStencilValue( float depth_ = {}, uint32_t stencil_ = {} ) VULKAN_HPP_NOEXCEPT - : depth( depth_ ) - , stencil( stencil_ ) + : depth{ depth_ } + , stencil{ stencil_ } { } @@ -13724,9 +13951,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_14 ClearAttachment( VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ = {}, uint32_t colorAttachment_ = {}, VULKAN_HPP_NAMESPACE::ClearValue clearValue_ = {} ) VULKAN_HPP_NOEXCEPT - : aspectMask( aspectMask_ ) - , colorAttachment( colorAttachment_ ) - , clearValue( clearValue_ ) + : aspectMask{ aspectMask_ } + , colorAttachment{ colorAttachment_ } + , clearValue{ clearValue_ } { } @@ -13797,9 +14024,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ClearRect( VULKAN_HPP_NAMESPACE::Rect2D rect_ = {}, uint32_t baseArrayLayer_ = {}, uint32_t layerCount_ = {} ) VULKAN_HPP_NOEXCEPT - : rect( rect_ ) - , baseArrayLayer( baseArrayLayer_ ) - , layerCount( layerCount_ ) + : rect{ rect_ } + , baseArrayLayer{ baseArrayLayer_ } + , layerCount{ layerCount_ } { } @@ -13888,9 +14115,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR CoarseSampleLocationNV( uint32_t pixelX_ = {}, uint32_t pixelY_ = {}, uint32_t sample_ = {} ) VULKAN_HPP_NOEXCEPT - : pixelX( pixelX_ ) - , pixelY( pixelY_ ) - , sample( sample_ ) + : pixelX{ pixelX_ } + , pixelY{ pixelY_ } + , sample{ sample_ } { } @@ -13986,10 +14213,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t sampleCount_ = {}, uint32_t sampleLocationCount_ = {}, const VULKAN_HPP_NAMESPACE::CoarseSampleLocationNV * pSampleLocations_ = {} ) VULKAN_HPP_NOEXCEPT - : shadingRate( shadingRate_ ) - , sampleCount( sampleCount_ ) - , sampleLocationCount( sampleLocationCount_ ) - , pSampleLocations( pSampleLocations_ ) + : shadingRate{ shadingRate_ } + , sampleCount{ sampleCount_ } + , sampleLocationCount{ sampleLocationCount_ } + , pSampleLocations{ pSampleLocations_ } { } @@ -14119,11 +14346,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 dstPremultiplied_ = {}, VULKAN_HPP_NAMESPACE::BlendOverlapEXT blendOverlap_ = VULKAN_HPP_NAMESPACE::BlendOverlapEXT::eUncorrelated, VULKAN_HPP_NAMESPACE::Bool32 clampResults_ = {} ) VULKAN_HPP_NOEXCEPT - : advancedBlendOp( advancedBlendOp_ ) - , srcPremultiplied( srcPremultiplied_ ) - , dstPremultiplied( dstPremultiplied_ ) - , blendOverlap( blendOverlap_ ) - , clampResults( clampResults_ ) + : advancedBlendOp{ advancedBlendOp_ } + , srcPremultiplied{ srcPremultiplied_ } + , dstPremultiplied{ dstPremultiplied_ } + , blendOverlap{ blendOverlap_ } + , clampResults{ clampResults_ } { } @@ -14239,12 +14466,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::BlendFactor srcAlphaBlendFactor_ = VULKAN_HPP_NAMESPACE::BlendFactor::eZero, VULKAN_HPP_NAMESPACE::BlendFactor dstAlphaBlendFactor_ = VULKAN_HPP_NAMESPACE::BlendFactor::eZero, VULKAN_HPP_NAMESPACE::BlendOp alphaBlendOp_ = VULKAN_HPP_NAMESPACE::BlendOp::eAdd ) VULKAN_HPP_NOEXCEPT - : srcColorBlendFactor( srcColorBlendFactor_ ) - , dstColorBlendFactor( dstColorBlendFactor_ ) - , colorBlendOp( colorBlendOp_ ) - , srcAlphaBlendFactor( srcAlphaBlendFactor_ ) - , dstAlphaBlendFactor( dstAlphaBlendFactor_ ) - , alphaBlendOp( alphaBlendOp_ ) + : srcColorBlendFactor{ srcColorBlendFactor_ } + , dstColorBlendFactor{ dstColorBlendFactor_ } + , colorBlendOp{ colorBlendOp_ } + , srcAlphaBlendFactor{ srcAlphaBlendFactor_ } + , dstAlphaBlendFactor{ dstAlphaBlendFactor_ } + , alphaBlendOp{ alphaBlendOp_ } { } @@ -14369,10 +14596,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::CommandBufferLevel level_ = VULKAN_HPP_NAMESPACE::CommandBufferLevel::ePrimary, uint32_t commandBufferCount_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , commandPool( commandPool_ ) - , level( level_ ) - , commandBufferCount( commandBufferCount_ ) + : pNext{ pNext_ } + , commandPool{ commandPool_ } + , level{ level_ } + , commandBufferCount{ commandBufferCount_ } { } @@ -14492,13 +14719,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::QueryControlFlags queryFlags_ = {}, VULKAN_HPP_NAMESPACE::QueryPipelineStatisticFlags pipelineStatistics_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , renderPass( renderPass_ ) - , subpass( subpass_ ) - , framebuffer( framebuffer_ ) - , occlusionQueryEnable( occlusionQueryEnable_ ) - , queryFlags( queryFlags_ ) - , pipelineStatistics( pipelineStatistics_ ) + : pNext{ pNext_ } + , renderPass{ renderPass_ } + , subpass{ subpass_ } + , framebuffer{ framebuffer_ } + , occlusionQueryEnable{ occlusionQueryEnable_ } + , queryFlags{ queryFlags_ } + , pipelineStatistics{ pipelineStatistics_ } { } @@ -14640,9 +14867,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR CommandBufferBeginInfo( VULKAN_HPP_NAMESPACE::CommandBufferUsageFlags flags_ = {}, const VULKAN_HPP_NAMESPACE::CommandBufferInheritanceInfo * pInheritanceInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , pInheritanceInfo( pInheritanceInfo_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , pInheritanceInfo{ pInheritanceInfo_ } { } @@ -14749,8 +14976,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR CommandBufferInheritanceConditionalRenderingInfoEXT( VULKAN_HPP_NAMESPACE::Bool32 conditionalRenderingEnable_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , conditionalRenderingEnable( conditionalRenderingEnable_ ) + : pNext{ pNext_ } + , conditionalRenderingEnable{ conditionalRenderingEnable_ } { } @@ -14851,9 +15078,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform_ = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity, VULKAN_HPP_NAMESPACE::Rect2D renderArea_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , transform( transform_ ) - , renderArea( renderArea_ ) + : pNext{ pNext_ } + , transform{ transform_ } + , renderArea{ renderArea_ } { } @@ -14970,14 +15197,14 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Format stencilAttachmentFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::SampleCountFlagBits rasterizationSamples_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , viewMask( viewMask_ ) - , colorAttachmentCount( colorAttachmentCount_ ) - , pColorAttachmentFormats( pColorAttachmentFormats_ ) - , depthAttachmentFormat( depthAttachmentFormat_ ) - , stencilAttachmentFormat( stencilAttachmentFormat_ ) - , rasterizationSamples( rasterizationSamples_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , viewMask{ viewMask_ } + , colorAttachmentCount{ colorAttachmentCount_ } + , pColorAttachmentFormats{ pColorAttachmentFormats_ } + , depthAttachmentFormat{ depthAttachmentFormat_ } + , stencilAttachmentFormat{ stencilAttachmentFormat_ } + , rasterizationSamples{ rasterizationSamples_ } { } @@ -15160,12 +15387,12 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR Viewport( float x_ = {}, float y_ = {}, float width_ = {}, float height_ = {}, float minDepth_ = {}, float maxDepth_ = {} ) VULKAN_HPP_NOEXCEPT - : x( x_ ) - , y( y_ ) - , width( width_ ) - , height( height_ ) - , minDepth( minDepth_ ) - , maxDepth( maxDepth_ ) + : x{ x_ } + , y{ y_ } + , width{ width_ } + , height{ height_ } + , minDepth{ minDepth_ } + , maxDepth{ maxDepth_ } { } @@ -15282,10 +15509,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t viewportDepthCount_ = {}, const VULKAN_HPP_NAMESPACE::Viewport * pViewportDepths_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , viewportScissor2D( viewportScissor2D_ ) - , viewportDepthCount( viewportDepthCount_ ) - , pViewportDepths( pViewportDepths_ ) + : pNext{ pNext_ } + , viewportScissor2D{ viewportScissor2D_ } + , viewportDepthCount{ viewportDepthCount_ } + , pViewportDepths{ pViewportDepths_ } { } @@ -15404,9 +15631,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR CommandBufferSubmitInfo( VULKAN_HPP_NAMESPACE::CommandBuffer commandBuffer_ = {}, uint32_t deviceMask_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , commandBuffer( commandBuffer_ ) - , deviceMask( deviceMask_ ) + : pNext{ pNext_ } + , commandBuffer{ commandBuffer_ } + , deviceMask{ deviceMask_ } { } @@ -15512,9 +15739,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR CommandPoolCreateInfo( VULKAN_HPP_NAMESPACE::CommandPoolCreateFlags flags_ = {}, uint32_t queueFamilyIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , queueFamilyIndex( queueFamilyIndex_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , queueFamilyIndex{ queueFamilyIndex_ } { } @@ -15613,9 +15840,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SpecializationMapEntry( uint32_t constantID_ = {}, uint32_t offset_ = {}, size_t size_ = {} ) VULKAN_HPP_NOEXCEPT - : constantID( constantID_ ) - , offset( offset_ ) - , size( size_ ) + : constantID{ constantID_ } + , offset{ offset_ } + , size{ size_ } { } @@ -15710,10 +15937,10 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::SpecializationMapEntry * pMapEntries_ = {}, size_t dataSize_ = {}, const void * pData_ = {} ) VULKAN_HPP_NOEXCEPT - : mapEntryCount( mapEntryCount_ ) - , pMapEntries( pMapEntries_ ) - , dataSize( dataSize_ ) - , pData( pData_ ) + : mapEntryCount{ mapEntryCount_ } + , pMapEntries{ pMapEntries_ } + , dataSize{ dataSize_ } + , pData{ pData_ } { } @@ -15849,12 +16076,12 @@ namespace VULKAN_HPP_NAMESPACE const char * pName_ = {}, const VULKAN_HPP_NAMESPACE::SpecializationInfo * pSpecializationInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , stage( stage_ ) - , module( module_ ) - , pName( pName_ ) - , pSpecializationInfo( pSpecializationInfo_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , stage{ stage_ } + , module{ module_ } + , pName{ pName_ } + , pSpecializationInfo{ pSpecializationInfo_ } { } @@ -16005,12 +16232,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , stage( stage_ ) - , layout( layout_ ) - , basePipelineHandle( basePipelineHandle_ ) - , basePipelineIndex( basePipelineIndex_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , stage{ stage_ } + , layout{ layout_ } + , basePipelineHandle{ basePipelineHandle_ } + , basePipelineIndex{ basePipelineIndex_ } { } @@ -16143,10 +16370,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, VULKAN_HPP_NAMESPACE::DeviceAddress pipelineDeviceAddressCaptureReplay_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceAddress( deviceAddress_ ) - , size( size_ ) - , pipelineDeviceAddressCaptureReplay( pipelineDeviceAddressCaptureReplay_ ) + : pNext{ pNext_ } + , deviceAddress{ deviceAddress_ } + , size{ size_ } + , pipelineDeviceAddressCaptureReplay{ pipelineDeviceAddressCaptureReplay_ } { } @@ -16264,10 +16491,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::ConditionalRenderingFlagsEXT flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , buffer( buffer_ ) - , offset( offset_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , buffer{ buffer_ } + , offset{ offset_ } + , flags{ flags_ } { } @@ -16377,10 +16604,10 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ConformanceVersion( uint8_t major_ = {}, uint8_t minor_ = {}, uint8_t subminor_ = {}, uint8_t patch_ = {} ) VULKAN_HPP_NOEXCEPT - : major( major_ ) - , minor( minor_ ) - , subminor( subminor_ ) - , patch( patch_ ) + : major{ major_ } + , minor{ minor_ } + , subminor{ subminor_ } + , patch{ patch_ } { } @@ -16490,16 +16717,16 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 saturatingAccumulation_ = {}, VULKAN_HPP_NAMESPACE::ScopeKHR scope_ = VULKAN_HPP_NAMESPACE::ScopeKHR::eDevice, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , MSize( MSize_ ) - , NSize( NSize_ ) - , KSize( KSize_ ) - , AType( AType_ ) - , BType( BType_ ) - , CType( CType_ ) - , ResultType( ResultType_ ) - , saturatingAccumulation( saturatingAccumulation_ ) - , scope( scope_ ) + : pNext{ pNext_ } + , MSize{ MSize_ } + , NSize{ NSize_ } + , KSize{ KSize_ } + , AType{ AType_ } + , BType{ BType_ } + , CType{ CType_ } + , ResultType{ ResultType_ } + , saturatingAccumulation{ saturatingAccumulation_ } + , scope{ scope_ } { } @@ -16608,15 +16835,15 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ComponentTypeNV DType_ = {}, VULKAN_HPP_NAMESPACE::ScopeNV scope_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , MSize( MSize_ ) - , NSize( NSize_ ) - , KSize( KSize_ ) - , AType( AType_ ) - , BType( BType_ ) - , CType( CType_ ) - , DType( DType_ ) - , scope( scope_ ) + : pNext{ pNext_ } + , MSize{ MSize_ } + , NSize{ NSize_ } + , KSize{ KSize_ } + , AType{ AType_ } + , BType{ BType_ } + , CType{ CType_ } + , DType{ DType_ } + , scope{ scope_ } { } @@ -16718,10 +16945,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst_ = {}, VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode_ = VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR::eClone, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , src( src_ ) - , dst( dst_ ) - , mode( mode_ ) + : pNext{ pNext_ } + , src{ src_ } + , dst{ dst_ } + , mode{ mode_ } { } @@ -16838,10 +17065,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR dst_ = {}, VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode_ = VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR::eClone, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , src( src_ ) - , dst( dst_ ) - , mode( mode_ ) + : pNext{ pNext_ } + , src{ src_ } + , dst{ dst_ } + , mode{ mode_ } { } @@ -16941,11 +17168,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::BufferCopy2 * pRegions_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcBuffer( srcBuffer_ ) - , dstBuffer( dstBuffer_ ) - , regionCount( regionCount_ ) - , pRegions( pRegions_ ) + : pNext{ pNext_ } + , srcBuffer{ srcBuffer_ } + , dstBuffer{ dstBuffer_ } + , regionCount{ regionCount_ } + , pRegions{ pRegions_ } { } @@ -17090,12 +17317,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::BufferImageCopy2 * pRegions_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcBuffer( srcBuffer_ ) - , dstImage( dstImage_ ) - , dstImageLayout( dstImageLayout_ ) - , regionCount( regionCount_ ) - , pRegions( pRegions_ ) + : pNext{ pNext_ } + , srcBuffer{ srcBuffer_ } + , dstImage{ dstImage_ } + , dstImageLayout{ dstImageLayout_ } + , regionCount{ regionCount_ } + , pRegions{ pRegions_ } { } @@ -17255,8 +17482,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR CopyCommandTransformInfoQCOM( VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform_ = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , transform( transform_ ) + : pNext{ pNext_ } + , transform{ transform_ } { } @@ -17358,14 +17585,14 @@ namespace VULKAN_HPP_NAMESPACE uint32_t dstArrayElement_ = {}, uint32_t descriptorCount_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcSet( srcSet_ ) - , srcBinding( srcBinding_ ) - , srcArrayElement( srcArrayElement_ ) - , dstSet( dstSet_ ) - , dstBinding( dstBinding_ ) - , dstArrayElement( dstArrayElement_ ) - , descriptorCount( descriptorCount_ ) + : pNext{ pNext_ } + , srcSet{ srcSet_ } + , srcBinding{ srcBinding_ } + , srcArrayElement{ srcArrayElement_ } + , dstSet{ dstSet_ } + , dstBinding{ dstBinding_ } + , dstArrayElement{ dstArrayElement_ } + , descriptorCount{ descriptorCount_ } { } @@ -17514,12 +17741,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Offset3D dstOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D extent_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcSubresource( srcSubresource_ ) - , srcOffset( srcOffset_ ) - , dstSubresource( dstSubresource_ ) - , dstOffset( dstOffset_ ) - , extent( extent_ ) + : pNext{ pNext_ } + , srcSubresource{ srcSubresource_ } + , srcOffset{ srcOffset_ } + , dstSubresource{ dstSubresource_ } + , dstOffset{ dstOffset_ } + , extent{ extent_ } { } @@ -17654,13 +17881,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::ImageCopy2 * pRegions_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcImage( srcImage_ ) - , srcImageLayout( srcImageLayout_ ) - , dstImage( dstImage_ ) - , dstImageLayout( dstImageLayout_ ) - , regionCount( regionCount_ ) - , pRegions( pRegions_ ) + : pNext{ pNext_ } + , srcImage{ srcImage_ } + , srcImageLayout{ srcImageLayout_ } + , dstImage{ dstImage_ } + , dstImageLayout{ dstImageLayout_ } + , regionCount{ regionCount_ } + , pRegions{ pRegions_ } { } @@ -17829,12 +18056,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::BufferImageCopy2 * pRegions_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcImage( srcImage_ ) - , srcImageLayout( srcImageLayout_ ) - , dstBuffer( dstBuffer_ ) - , regionCount( regionCount_ ) - , pRegions( pRegions_ ) + : pNext{ pNext_ } + , srcImage{ srcImage_ } + , srcImageLayout{ srcImageLayout_ } + , dstBuffer{ dstBuffer_ } + , regionCount{ regionCount_ } + , pRegions{ pRegions_ } { } @@ -17999,14 +18226,14 @@ namespace VULKAN_HPP_NAMESPACE uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::ImageCopy2 * pRegions_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , srcImage( srcImage_ ) - , srcImageLayout( srcImageLayout_ ) - , dstImage( dstImage_ ) - , dstImageLayout( dstImageLayout_ ) - , regionCount( regionCount_ ) - , pRegions( pRegions_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , srcImage{ srcImage_ } + , srcImageLayout{ srcImageLayout_ } + , dstImage{ dstImage_ } + , dstImageLayout{ dstImageLayout_ } + , regionCount{ regionCount_ } + , pRegions{ pRegions_ } { } @@ -18189,13 +18416,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Offset3D imageOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D imageExtent_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pHostPointer( pHostPointer_ ) - , memoryRowLength( memoryRowLength_ ) - , memoryImageHeight( memoryImageHeight_ ) - , imageSubresource( imageSubresource_ ) - , imageOffset( imageOffset_ ) - , imageExtent( imageExtent_ ) + : pNext{ pNext_ } + , pHostPointer{ pHostPointer_ } + , memoryRowLength{ memoryRowLength_ } + , memoryImageHeight{ memoryImageHeight_ } + , imageSubresource{ imageSubresource_ } + , imageOffset{ imageOffset_ } + , imageExtent{ imageExtent_ } { } @@ -18340,12 +18567,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::ImageToMemoryCopyEXT * pRegions_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , srcImage( srcImage_ ) - , srcImageLayout( srcImageLayout_ ) - , regionCount( regionCount_ ) - , pRegions( pRegions_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , srcImage{ srcImage_ } + , srcImageLayout{ srcImageLayout_ } + , regionCount{ regionCount_ } + , pRegions{ pRegions_ } { } @@ -18500,9 +18727,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR CopyMemoryIndirectCommandNV( VULKAN_HPP_NAMESPACE::DeviceAddress srcAddress_ = {}, VULKAN_HPP_NAMESPACE::DeviceAddress dstAddress_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {} ) VULKAN_HPP_NOEXCEPT - : srcAddress( srcAddress_ ) - , dstAddress( dstAddress_ ) - , size( size_ ) + : srcAddress{ srcAddress_ } + , dstAddress{ dstAddress_ } + , size{ size_ } { } @@ -18601,10 +18828,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst_ = {}, VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode_ = VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR::eClone, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , src( src_ ) - , dst( dst_ ) - , mode( mode_ ) + : pNext{ pNext_ } + , src{ src_ } + , dst{ dst_ } + , mode{ mode_ } { } @@ -18703,12 +18930,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageSubresourceLayers imageSubresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D imageOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D imageExtent_ = {} ) VULKAN_HPP_NOEXCEPT - : srcAddress( srcAddress_ ) - , bufferRowLength( bufferRowLength_ ) - , bufferImageHeight( bufferImageHeight_ ) - , imageSubresource( imageSubresource_ ) - , imageOffset( imageOffset_ ) - , imageExtent( imageExtent_ ) + : srcAddress{ srcAddress_ } + , bufferRowLength{ bufferRowLength_ } + , bufferImageHeight{ bufferImageHeight_ } + , imageSubresource{ imageSubresource_ } + , imageOffset{ imageOffset_ } + , imageExtent{ imageExtent_ } { } @@ -18837,13 +19064,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Offset3D imageOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D imageExtent_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pHostPointer( pHostPointer_ ) - , memoryRowLength( memoryRowLength_ ) - , memoryImageHeight( memoryImageHeight_ ) - , imageSubresource( imageSubresource_ ) - , imageOffset( imageOffset_ ) - , imageExtent( imageExtent_ ) + : pNext{ pNext_ } + , pHostPointer{ pHostPointer_ } + , memoryRowLength{ memoryRowLength_ } + , memoryImageHeight{ memoryImageHeight_ } + , imageSubresource{ imageSubresource_ } + , imageOffset{ imageOffset_ } + , imageExtent{ imageExtent_ } { } @@ -18988,12 +19215,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::MemoryToImageCopyEXT * pRegions_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , dstImage( dstImage_ ) - , dstImageLayout( dstImageLayout_ ) - , regionCount( regionCount_ ) - , pRegions( pRegions_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , dstImage{ dstImage_ } + , dstImageLayout{ dstImageLayout_ } + , regionCount{ regionCount_ } + , pRegions{ pRegions_ } { } @@ -19152,10 +19379,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::MicromapEXT dst_ = {}, VULKAN_HPP_NAMESPACE::CopyMicromapModeEXT mode_ = VULKAN_HPP_NAMESPACE::CopyMicromapModeEXT::eClone, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , src( src_ ) - , dst( dst_ ) - , mode( mode_ ) + : pNext{ pNext_ } + , src{ src_ } + , dst{ dst_ } + , mode{ mode_ } { } @@ -19253,10 +19480,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::MicromapEXT dst_ = {}, VULKAN_HPP_NAMESPACE::CopyMicromapModeEXT mode_ = VULKAN_HPP_NAMESPACE::CopyMicromapModeEXT::eClone, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , src( src_ ) - , dst( dst_ ) - , mode( mode_ ) + : pNext{ pNext_ } + , src{ src_ } + , dst{ dst_ } + , mode{ mode_ } { } @@ -19371,10 +19598,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR dst_ = {}, VULKAN_HPP_NAMESPACE::CopyMicromapModeEXT mode_ = VULKAN_HPP_NAMESPACE::CopyMicromapModeEXT::eClone, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , src( src_ ) - , dst( dst_ ) - , mode( mode_ ) + : pNext{ pNext_ } + , src{ src_ } + , dst{ dst_ } + , mode{ mode_ } { } @@ -19470,9 +19697,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR CuFunctionCreateInfoNVX( VULKAN_HPP_NAMESPACE::CuModuleNVX module_ = {}, const char * pName_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , module( module_ ) - , pName( pName_ ) + : pNext{ pNext_ } + , module{ module_ } + , pName{ pName_ } { } @@ -19595,19 +19822,19 @@ namespace VULKAN_HPP_NAMESPACE size_t extraCount_ = {}, const void * const * pExtras_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , function( function_ ) - , gridDimX( gridDimX_ ) - , gridDimY( gridDimY_ ) - , gridDimZ( gridDimZ_ ) - , blockDimX( blockDimX_ ) - , blockDimY( blockDimY_ ) - , blockDimZ( blockDimZ_ ) - , sharedMemBytes( sharedMemBytes_ ) - , paramCount( paramCount_ ) - , pParams( pParams_ ) - , extraCount( extraCount_ ) - , pExtras( pExtras_ ) + : pNext{ pNext_ } + , function{ function_ } + , gridDimX{ gridDimX_ } + , gridDimY{ gridDimY_ } + , gridDimZ{ gridDimZ_ } + , blockDimX{ blockDimX_ } + , blockDimY{ blockDimY_ } + , blockDimZ{ blockDimZ_ } + , sharedMemBytes{ sharedMemBytes_ } + , paramCount{ paramCount_ } + , pParams{ pParams_ } + , extraCount{ extraCount_ } + , pExtras{ pExtras_ } { } @@ -19840,9 +20067,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR CuModuleCreateInfoNVX( size_t dataSize_ = {}, const void * pData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , dataSize( dataSize_ ) - , pData( pData_ ) + : pNext{ pNext_ } + , dataSize{ dataSize_ } + , pData{ pData_ } { } @@ -19964,9 +20191,9 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR CudaFunctionCreateInfoNV( VULKAN_HPP_NAMESPACE::CudaModuleNV module_ = {}, const char * pName_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , module( module_ ) - , pName( pName_ ) + : pNext{ pNext_ } + , module{ module_ } + , pName{ pName_ } { } @@ -20091,19 +20318,19 @@ namespace VULKAN_HPP_NAMESPACE size_t extraCount_ = {}, const void * const * pExtras_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , function( function_ ) - , gridDimX( gridDimX_ ) - , gridDimY( gridDimY_ ) - , gridDimZ( gridDimZ_ ) - , blockDimX( blockDimX_ ) - , blockDimY( blockDimY_ ) - , blockDimZ( blockDimZ_ ) - , sharedMemBytes( sharedMemBytes_ ) - , paramCount( paramCount_ ) - , pParams( pParams_ ) - , extraCount( extraCount_ ) - , pExtras( pExtras_ ) + : pNext{ pNext_ } + , function{ function_ } + , gridDimX{ gridDimX_ } + , gridDimY{ gridDimY_ } + , gridDimZ{ gridDimZ_ } + , blockDimX{ blockDimX_ } + , blockDimY{ blockDimY_ } + , blockDimZ{ blockDimZ_ } + , sharedMemBytes{ sharedMemBytes_ } + , paramCount{ paramCount_ } + , pParams{ pParams_ } + , extraCount{ extraCount_ } + , pExtras{ pExtras_ } { } @@ -20338,9 +20565,9 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR CudaModuleCreateInfoNV( size_t dataSize_ = {}, const void * pData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , dataSize( dataSize_ ) - , pData( pData_ ) + : pNext{ pNext_ } + , dataSize{ dataSize_ } + , pData{ pData_ } { } @@ -20466,11 +20693,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t signalSemaphoreValuesCount_ = {}, const uint64_t * pSignalSemaphoreValues_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , waitSemaphoreValuesCount( waitSemaphoreValuesCount_ ) - , pWaitSemaphoreValues( pWaitSemaphoreValues_ ) - , signalSemaphoreValuesCount( signalSemaphoreValuesCount_ ) - , pSignalSemaphoreValues( pSignalSemaphoreValues_ ) + : pNext{ pNext_ } + , waitSemaphoreValuesCount{ waitSemaphoreValuesCount_ } + , pWaitSemaphoreValues{ pWaitSemaphoreValues_ } + , signalSemaphoreValuesCount{ signalSemaphoreValuesCount_ } + , pSignalSemaphoreValues{ pSignalSemaphoreValues_ } { } @@ -20628,9 +20855,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR_14 DebugMarkerMarkerInfoEXT( const char * pMarkerName_ = {}, std::array const & color_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pMarkerName( pMarkerName_ ) - , color( color_ ) + : pNext{ pNext_ } + , pMarkerName{ pMarkerName_ } + , color{ color_ } { } @@ -20746,10 +20973,10 @@ namespace VULKAN_HPP_NAMESPACE uint64_t object_ = {}, const char * pObjectName_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , objectType( objectType_ ) - , object( object_ ) - , pObjectName( pObjectName_ ) + : pNext{ pNext_ } + , objectType{ objectType_ } + , object{ object_ } + , pObjectName{ pObjectName_ } { } @@ -20880,12 +21107,12 @@ namespace VULKAN_HPP_NAMESPACE size_t tagSize_ = {}, const void * pTag_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , objectType( objectType_ ) - , object( object_ ) - , tagName( tagName_ ) - , tagSize( tagSize_ ) - , pTag( pTag_ ) + : pNext{ pNext_ } + , objectType{ objectType_ } + , object{ object_ } + , tagName{ tagName_ } + , tagSize{ tagSize_ } + , pTag{ pTag_ } { } @@ -21040,10 +21267,10 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkDebugReportCallbackEXT pfnCallback_ = {}, void * pUserData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , pfnCallback( pfnCallback_ ) - , pUserData( pUserData_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , pfnCallback{ pfnCallback_ } + , pUserData{ pUserData_ } { } @@ -21153,9 +21380,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR_14 DebugUtilsLabelEXT( const char * pLabelName_ = {}, std::array const & color_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pLabelName( pLabelName_ ) - , color( color_ ) + : pNext{ pNext_ } + , pLabelName{ pLabelName_ } + , color{ color_ } { } @@ -21267,10 +21494,10 @@ namespace VULKAN_HPP_NAMESPACE uint64_t objectHandle_ = {}, const char * pObjectName_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , objectType( objectType_ ) - , objectHandle( objectHandle_ ) - , pObjectName( pObjectName_ ) + : pNext{ pNext_ } + , objectType{ objectType_ } + , objectHandle{ objectHandle_ } + , pObjectName{ pObjectName_ } { } @@ -21402,17 +21629,17 @@ namespace VULKAN_HPP_NAMESPACE uint32_t objectCount_ = {}, const VULKAN_HPP_NAMESPACE::DebugUtilsObjectNameInfoEXT * pObjects_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , pMessageIdName( pMessageIdName_ ) - , messageIdNumber( messageIdNumber_ ) - , pMessage( pMessage_ ) - , queueLabelCount( queueLabelCount_ ) - , pQueueLabels( pQueueLabels_ ) - , cmdBufLabelCount( cmdBufLabelCount_ ) - , pCmdBufLabels( pCmdBufLabels_ ) - , objectCount( objectCount_ ) - , pObjects( pObjects_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , pMessageIdName{ pMessageIdName_ } + , messageIdNumber{ messageIdNumber_ } + , pMessage{ pMessage_ } + , queueLabelCount{ queueLabelCount_ } + , pQueueLabels{ pQueueLabels_ } + , cmdBufLabelCount{ cmdBufLabelCount_ } + , pCmdBufLabels{ pCmdBufLabels_ } + , objectCount{ objectCount_ } + , pObjects{ pObjects_ } { } @@ -21676,12 +21903,12 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback_ = {}, void * pUserData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , messageSeverity( messageSeverity_ ) - , messageType( messageType_ ) - , pfnUserCallback( pfnUserCallback_ ) - , pUserData( pUserData_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , messageSeverity{ messageSeverity_ } + , messageType{ messageType_ } + , pfnUserCallback{ pfnUserCallback_ } + , pUserData{ pUserData_ } { } @@ -21814,12 +22041,12 @@ namespace VULKAN_HPP_NAMESPACE size_t tagSize_ = {}, const void * pTag_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , objectType( objectType_ ) - , objectHandle( objectHandle_ ) - , tagName( tagName_ ) - , tagSize( tagSize_ ) - , pTag( pTag_ ) + : pNext{ pNext_ } + , objectType{ objectType_ } + , objectHandle{ objectHandle_ } + , tagName{ tagName_ } + , tagSize{ tagSize_ } + , pTag{ pTag_ } { } @@ -21977,11 +22204,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize compressedSize_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize decompressedSize_ = {}, VULKAN_HPP_NAMESPACE::MemoryDecompressionMethodFlagsNV decompressionMethod_ = {} ) VULKAN_HPP_NOEXCEPT - : srcAddress( srcAddress_ ) - , dstAddress( dstAddress_ ) - , compressedSize( compressedSize_ ) - , decompressedSize( decompressedSize_ ) - , decompressionMethod( decompressionMethod_ ) + : srcAddress{ srcAddress_ } + , dstAddress{ dstAddress_ } + , compressedSize{ compressedSize_ } + , decompressedSize{ decompressedSize_ } + , decompressionMethod{ decompressionMethod_ } { } @@ -22097,8 +22324,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DedicatedAllocationBufferCreateInfoNV( VULKAN_HPP_NAMESPACE::Bool32 dedicatedAllocation_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , dedicatedAllocation( dedicatedAllocation_ ) + : pNext{ pNext_ } + , dedicatedAllocation{ dedicatedAllocation_ } { } @@ -22195,8 +22422,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DedicatedAllocationImageCreateInfoNV( VULKAN_HPP_NAMESPACE::Bool32 dedicatedAllocation_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , dedicatedAllocation( dedicatedAllocation_ ) + : pNext{ pNext_ } + , dedicatedAllocation{ dedicatedAllocation_ } { } @@ -22294,9 +22521,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DedicatedAllocationMemoryAllocateInfoNV( VULKAN_HPP_NAMESPACE::Image image_ = {}, VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , image( image_ ) - , buffer( buffer_ ) + : pNext{ pNext_ } + , image{ image_ } + , buffer{ buffer_ } { } @@ -22402,11 +22629,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::PipelineStageFlags2 dstStageMask_ = {}, VULKAN_HPP_NAMESPACE::AccessFlags2 dstAccessMask_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcStageMask( srcStageMask_ ) - , srcAccessMask( srcAccessMask_ ) - , dstStageMask( dstStageMask_ ) - , dstAccessMask( dstAccessMask_ ) + : pNext{ pNext_ } + , srcStageMask{ srcStageMask_ } + , srcAccessMask{ srcAccessMask_ } + , dstStageMask{ dstStageMask_ } + , dstAccessMask{ dstAccessMask_ } { } @@ -22528,11 +22755,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t levelCount_ = {}, uint32_t baseArrayLayer_ = {}, uint32_t layerCount_ = {} ) VULKAN_HPP_NOEXCEPT - : aspectMask( aspectMask_ ) - , baseMipLevel( baseMipLevel_ ) - , levelCount( levelCount_ ) - , baseArrayLayer( baseArrayLayer_ ) - , layerCount( layerCount_ ) + : aspectMask{ aspectMask_ } + , baseMipLevel{ baseMipLevel_ } + , levelCount{ levelCount_ } + , baseArrayLayer{ baseArrayLayer_ } + , layerCount{ layerCount_ } { } @@ -22652,17 +22879,17 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Image image_ = {}, VULKAN_HPP_NAMESPACE::ImageSubresourceRange subresourceRange_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcStageMask( srcStageMask_ ) - , srcAccessMask( srcAccessMask_ ) - , dstStageMask( dstStageMask_ ) - , dstAccessMask( dstAccessMask_ ) - , oldLayout( oldLayout_ ) - , newLayout( newLayout_ ) - , srcQueueFamilyIndex( srcQueueFamilyIndex_ ) - , dstQueueFamilyIndex( dstQueueFamilyIndex_ ) - , image( image_ ) - , subresourceRange( subresourceRange_ ) + : pNext{ pNext_ } + , srcStageMask{ srcStageMask_ } + , srcAccessMask{ srcAccessMask_ } + , dstStageMask{ dstStageMask_ } + , dstAccessMask{ dstAccessMask_ } + , oldLayout{ oldLayout_ } + , newLayout{ newLayout_ } + , srcQueueFamilyIndex{ srcQueueFamilyIndex_ } + , dstQueueFamilyIndex{ dstQueueFamilyIndex_ } + , image{ image_ } + , subresourceRange{ subresourceRange_ } { } @@ -22854,14 +23081,14 @@ namespace VULKAN_HPP_NAMESPACE uint32_t imageMemoryBarrierCount_ = {}, const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier2 * pImageMemoryBarriers_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , dependencyFlags( dependencyFlags_ ) - , memoryBarrierCount( memoryBarrierCount_ ) - , pMemoryBarriers( pMemoryBarriers_ ) - , bufferMemoryBarrierCount( bufferMemoryBarrierCount_ ) - , pBufferMemoryBarriers( pBufferMemoryBarriers_ ) - , imageMemoryBarrierCount( imageMemoryBarrierCount_ ) - , pImageMemoryBarriers( pImageMemoryBarriers_ ) + : pNext{ pNext_ } + , dependencyFlags{ dependencyFlags_ } + , memoryBarrierCount{ memoryBarrierCount_ } + , pMemoryBarriers{ pMemoryBarriers_ } + , bufferMemoryBarrierCount{ bufferMemoryBarrierCount_ } + , pBufferMemoryBarriers{ pBufferMemoryBarriers_ } + , imageMemoryBarrierCount{ imageMemoryBarrierCount_ } + , pImageMemoryBarriers{ pImageMemoryBarriers_ } { } @@ -23069,10 +23296,10 @@ namespace VULKAN_HPP_NAMESPACE float depthBiasClamp_ = {}, float depthBiasSlopeFactor_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , depthBiasConstantFactor( depthBiasConstantFactor_ ) - , depthBiasClamp( depthBiasClamp_ ) - , depthBiasSlopeFactor( depthBiasSlopeFactor_ ) + : pNext{ pNext_ } + , depthBiasConstantFactor{ depthBiasConstantFactor_ } + , depthBiasClamp{ depthBiasClamp_ } + , depthBiasSlopeFactor{ depthBiasSlopeFactor_ } { } @@ -23182,9 +23409,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DepthBiasRepresentationEXT::eLeastRepresentableValueFormat, VULKAN_HPP_NAMESPACE::Bool32 depthBiasExact_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , depthBiasRepresentation( depthBiasRepresentation_ ) - , depthBiasExact( depthBiasExact_ ) + : pNext{ pNext_ } + , depthBiasRepresentation{ depthBiasRepresentation_ } + , depthBiasExact{ depthBiasExact_ } { } @@ -23294,10 +23521,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize range_ = {}, VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , address( address_ ) - , range( range_ ) - , format( format_ ) + : pNext{ pNext_ } + , address{ address_ } + , range{ range_ } + , format{ format_ } { } @@ -23411,10 +23638,10 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DescriptorBufferBindingInfoEXT( VULKAN_HPP_NAMESPACE::DeviceAddress address_ = {}, VULKAN_HPP_NAMESPACE::BufferUsageFlags usage_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , address( address_ ) - , usage( usage_ ) + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , address{ address_ } + , usage{ usage_ } { } @@ -23435,7 +23662,7 @@ namespace VULKAN_HPP_NAMESPACE } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 DescriptorBufferBindingInfoEXT & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 DescriptorBufferBindingInfoEXT & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; @@ -23469,7 +23696,7 @@ namespace VULKAN_HPP_NAMESPACE auto # else std::tuple # endif @@ -23499,7 +23726,7 @@ namespace VULKAN_HPP_NAMESPACE public: VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDescriptorBufferBindingInfoEXT; - void * pNext = {}; + const void * pNext = {}; VULKAN_HPP_NAMESPACE::DeviceAddress address = {}; VULKAN_HPP_NAMESPACE::BufferUsageFlags usage = {}; }; @@ -23519,9 +23746,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DescriptorBufferBindingPushDescriptorBufferHandleEXT( VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , buffer( buffer_ ) + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , buffer{ buffer_ } { } @@ -23544,7 +23771,7 @@ namespace VULKAN_HPP_NAMESPACE } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 DescriptorBufferBindingPushDescriptorBufferHandleEXT & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 DescriptorBufferBindingPushDescriptorBufferHandleEXT & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; @@ -23571,7 +23798,7 @@ namespace VULKAN_HPP_NAMESPACE # if 14 <= VULKAN_HPP_CPP_VERSION auto # else - std::tuple + std::tuple # endif reflect() const VULKAN_HPP_NOEXCEPT { @@ -23599,7 +23826,7 @@ namespace VULKAN_HPP_NAMESPACE public: VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDescriptorBufferBindingPushDescriptorBufferHandleEXT; - void * pNext = {}; + const void * pNext = {}; VULKAN_HPP_NAMESPACE::Buffer buffer = {}; }; @@ -23617,9 +23844,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DescriptorBufferInfo( VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize range_ = {} ) VULKAN_HPP_NOEXCEPT - : buffer( buffer_ ) - , offset( offset_ ) - , range( range_ ) + : buffer{ buffer_ } + , offset{ offset_ } + , range{ range_ } { } @@ -23714,9 +23941,9 @@ namespace VULKAN_HPP_NAMESPACE DescriptorImageInfo( VULKAN_HPP_NAMESPACE::Sampler sampler_ = {}, VULKAN_HPP_NAMESPACE::ImageView imageView_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined ) VULKAN_HPP_NOEXCEPT - : sampler( sampler_ ) - , imageView( imageView_ ) - , imageLayout( imageLayout_ ) + : sampler{ sampler_ } + , imageView{ imageView_ } + , imageLayout{ imageLayout_ } { } @@ -23933,9 +24160,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_14 DescriptorGetInfoEXT( VULKAN_HPP_NAMESPACE::DescriptorType type_ = VULKAN_HPP_NAMESPACE::DescriptorType::eSampler, VULKAN_HPP_NAMESPACE::DescriptorDataEXT data_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , type( type_ ) - , data( data_ ) + : pNext{ pNext_ } + , type{ type_ } + , data{ data_ } { } @@ -24020,8 +24247,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DescriptorPoolSize( VULKAN_HPP_NAMESPACE::DescriptorType type_ = VULKAN_HPP_NAMESPACE::DescriptorType::eSampler, uint32_t descriptorCount_ = {} ) VULKAN_HPP_NOEXCEPT - : type( type_ ) - , descriptorCount( descriptorCount_ ) + : type{ type_ } + , descriptorCount{ descriptorCount_ } { } @@ -24110,11 +24337,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t poolSizeCount_ = {}, const VULKAN_HPP_NAMESPACE::DescriptorPoolSize * pPoolSizes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , maxSets( maxSets_ ) - , poolSizeCount( poolSizeCount_ ) - , pPoolSizes( pPoolSizes_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , maxSets{ maxSets_ } + , poolSizeCount{ poolSizeCount_ } + , pPoolSizes{ pPoolSizes_ } { } @@ -24257,8 +24484,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DescriptorPoolInlineUniformBlockCreateInfo( uint32_t maxInlineUniformBlockBindings_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxInlineUniformBlockBindings( maxInlineUniformBlockBindings_ ) + : pNext{ pNext_ } + , maxInlineUniformBlockBindings{ maxInlineUniformBlockBindings_ } { } @@ -24359,10 +24586,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t descriptorSetCount_ = {}, const VULKAN_HPP_NAMESPACE::DescriptorSetLayout * pSetLayouts_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , descriptorPool( descriptorPool_ ) - , descriptorSetCount( descriptorSetCount_ ) - , pSetLayouts( pSetLayouts_ ) + : pNext{ pNext_ } + , descriptorPool{ descriptorPool_ } + , descriptorSetCount{ descriptorSetCount_ } + , pSetLayouts{ pSetLayouts_ } { } @@ -24497,9 +24724,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DescriptorSetBindingReferenceVALVE( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout_ = {}, uint32_t binding_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , descriptorSetLayout( descriptorSetLayout_ ) - , binding( binding_ ) + : pNext{ pNext_ } + , descriptorSetLayout{ descriptorSetLayout_ } + , binding{ binding_ } { } @@ -24603,11 +24830,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t descriptorCount_ = {}, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ = {}, const VULKAN_HPP_NAMESPACE::Sampler * pImmutableSamplers_ = {} ) VULKAN_HPP_NOEXCEPT - : binding( binding_ ) - , descriptorType( descriptorType_ ) - , descriptorCount( descriptorCount_ ) - , stageFlags( stageFlags_ ) - , pImmutableSamplers( pImmutableSamplers_ ) + : binding{ binding_ } + , descriptorType{ descriptorType_ } + , descriptorCount{ descriptorCount_ } + , stageFlags{ stageFlags_ } + , pImmutableSamplers{ pImmutableSamplers_ } { } @@ -24747,9 +24974,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DescriptorSetLayoutBindingFlagsCreateInfo( uint32_t bindingCount_ = {}, const VULKAN_HPP_NAMESPACE::DescriptorBindingFlags * pBindingFlags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , bindingCount( bindingCount_ ) - , pBindingFlags( pBindingFlags_ ) + : pNext{ pNext_ } + , bindingCount{ bindingCount_ } + , pBindingFlags{ pBindingFlags_ } { } @@ -24876,10 +25103,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t bindingCount_ = {}, const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutBinding * pBindings_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , bindingCount( bindingCount_ ) - , pBindings( pBindings_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , bindingCount{ bindingCount_ } + , pBindings{ pBindings_ } { } @@ -25014,9 +25241,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DescriptorSetLayoutHostMappingInfoVALVE( size_t descriptorOffset_ = {}, uint32_t descriptorSize_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , descriptorOffset( descriptorOffset_ ) - , descriptorSize( descriptorSize_ ) + : pNext{ pNext_ } + , descriptorOffset{ descriptorOffset_ } + , descriptorSize{ descriptorSize_ } { } @@ -25118,8 +25345,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DescriptorSetLayoutSupport( VULKAN_HPP_NAMESPACE::Bool32 supported_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , supported( supported_ ) + : pNext{ pNext_ } + , supported{ supported_ } { } @@ -25204,9 +25431,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DescriptorSetVariableDescriptorCountAllocateInfo( uint32_t descriptorSetCount_ = {}, const uint32_t * pDescriptorCounts_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , descriptorSetCount( descriptorSetCount_ ) - , pDescriptorCounts( pDescriptorCounts_ ) + : pNext{ pNext_ } + , descriptorSetCount{ descriptorSetCount_ } + , pDescriptorCounts{ pDescriptorCounts_ } { } @@ -25331,8 +25558,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DescriptorSetVariableDescriptorCountLayoutSupport( uint32_t maxVariableDescriptorCount_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxVariableDescriptorCount( maxVariableDescriptorCount_ ) + : pNext{ pNext_ } + , maxVariableDescriptorCount{ maxVariableDescriptorCount_ } { } @@ -25419,12 +25646,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DescriptorType descriptorType_ = VULKAN_HPP_NAMESPACE::DescriptorType::eSampler, size_t offset_ = {}, size_t stride_ = {} ) VULKAN_HPP_NOEXCEPT - : dstBinding( dstBinding_ ) - , dstArrayElement( dstArrayElement_ ) - , descriptorCount( descriptorCount_ ) - , descriptorType( descriptorType_ ) - , offset( offset_ ) - , stride( stride_ ) + : dstBinding{ dstBinding_ } + , dstArrayElement{ dstArrayElement_ } + , descriptorCount{ descriptorCount_ } + , descriptorType{ descriptorType_ } + , offset{ offset_ } + , stride{ stride_ } { } @@ -25552,15 +25779,15 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout_ = {}, uint32_t set_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , descriptorUpdateEntryCount( descriptorUpdateEntryCount_ ) - , pDescriptorUpdateEntries( pDescriptorUpdateEntries_ ) - , templateType( templateType_ ) - , descriptorSetLayout( descriptorSetLayout_ ) - , pipelineBindPoint( pipelineBindPoint_ ) - , pipelineLayout( pipelineLayout_ ) - , set( set_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , descriptorUpdateEntryCount{ descriptorUpdateEntryCount_ } + , pDescriptorUpdateEntries{ pDescriptorUpdateEntries_ } + , templateType{ templateType_ } + , descriptorSetLayout{ descriptorSetLayout_ } + , pipelineBindPoint{ pipelineBindPoint_ } + , pipelineLayout{ pipelineLayout_ } + , set{ set_ } { } @@ -25763,11 +25990,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, VULKAN_HPP_NAMESPACE::DeviceAddressBindingTypeEXT bindingType_ = VULKAN_HPP_NAMESPACE::DeviceAddressBindingTypeEXT::eBind, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , baseAddress( baseAddress_ ) - , size( size_ ) - , bindingType( bindingType_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , baseAddress{ baseAddress_ } + , size{ size_ } + , bindingType{ bindingType_ } { } @@ -25891,8 +26118,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DeviceBufferMemoryRequirements( const VULKAN_HPP_NAMESPACE::BufferCreateInfo * pCreateInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pCreateInfo( pCreateInfo_ ) + : pNext{ pNext_ } + , pCreateInfo{ pCreateInfo_ } { } @@ -25993,11 +26220,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t queueCount_ = {}, const float * pQueuePriorities_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , queueFamilyIndex( queueFamilyIndex_ ) - , queueCount( queueCount_ ) - , pQueuePriorities( pQueuePriorities_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , queueFamilyIndex{ queueFamilyIndex_ } + , queueCount{ queueCount_ } + , pQueuePriorities{ pQueuePriorities_ } { } @@ -26193,61 +26420,61 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 sparseResidencyAliased_ = {}, VULKAN_HPP_NAMESPACE::Bool32 variableMultisampleRate_ = {}, VULKAN_HPP_NAMESPACE::Bool32 inheritedQueries_ = {} ) VULKAN_HPP_NOEXCEPT - : robustBufferAccess( robustBufferAccess_ ) - , fullDrawIndexUint32( fullDrawIndexUint32_ ) - , imageCubeArray( imageCubeArray_ ) - , independentBlend( independentBlend_ ) - , geometryShader( geometryShader_ ) - , tessellationShader( tessellationShader_ ) - , sampleRateShading( sampleRateShading_ ) - , dualSrcBlend( dualSrcBlend_ ) - , logicOp( logicOp_ ) - , multiDrawIndirect( multiDrawIndirect_ ) - , drawIndirectFirstInstance( drawIndirectFirstInstance_ ) - , depthClamp( depthClamp_ ) - , depthBiasClamp( depthBiasClamp_ ) - , fillModeNonSolid( fillModeNonSolid_ ) - , depthBounds( depthBounds_ ) - , wideLines( wideLines_ ) - , largePoints( largePoints_ ) - , alphaToOne( alphaToOne_ ) - , multiViewport( multiViewport_ ) - , samplerAnisotropy( samplerAnisotropy_ ) - , textureCompressionETC2( textureCompressionETC2_ ) - , textureCompressionASTC_LDR( textureCompressionASTC_LDR_ ) - , textureCompressionBC( textureCompressionBC_ ) - , occlusionQueryPrecise( occlusionQueryPrecise_ ) - , pipelineStatisticsQuery( pipelineStatisticsQuery_ ) - , vertexPipelineStoresAndAtomics( vertexPipelineStoresAndAtomics_ ) - , fragmentStoresAndAtomics( fragmentStoresAndAtomics_ ) - , shaderTessellationAndGeometryPointSize( shaderTessellationAndGeometryPointSize_ ) - , shaderImageGatherExtended( shaderImageGatherExtended_ ) - , shaderStorageImageExtendedFormats( shaderStorageImageExtendedFormats_ ) - , shaderStorageImageMultisample( shaderStorageImageMultisample_ ) - , shaderStorageImageReadWithoutFormat( shaderStorageImageReadWithoutFormat_ ) - , shaderStorageImageWriteWithoutFormat( shaderStorageImageWriteWithoutFormat_ ) - , shaderUniformBufferArrayDynamicIndexing( shaderUniformBufferArrayDynamicIndexing_ ) - , shaderSampledImageArrayDynamicIndexing( shaderSampledImageArrayDynamicIndexing_ ) - , shaderStorageBufferArrayDynamicIndexing( shaderStorageBufferArrayDynamicIndexing_ ) - , shaderStorageImageArrayDynamicIndexing( shaderStorageImageArrayDynamicIndexing_ ) - , shaderClipDistance( shaderClipDistance_ ) - , shaderCullDistance( shaderCullDistance_ ) - , shaderFloat64( shaderFloat64_ ) - , shaderInt64( shaderInt64_ ) - , shaderInt16( shaderInt16_ ) - , shaderResourceResidency( shaderResourceResidency_ ) - , shaderResourceMinLod( shaderResourceMinLod_ ) - , sparseBinding( sparseBinding_ ) - , sparseResidencyBuffer( sparseResidencyBuffer_ ) - , sparseResidencyImage2D( sparseResidencyImage2D_ ) - , sparseResidencyImage3D( sparseResidencyImage3D_ ) - , sparseResidency2Samples( sparseResidency2Samples_ ) - , sparseResidency4Samples( sparseResidency4Samples_ ) - , sparseResidency8Samples( sparseResidency8Samples_ ) - , sparseResidency16Samples( sparseResidency16Samples_ ) - , sparseResidencyAliased( sparseResidencyAliased_ ) - , variableMultisampleRate( variableMultisampleRate_ ) - , inheritedQueries( inheritedQueries_ ) + : robustBufferAccess{ robustBufferAccess_ } + , fullDrawIndexUint32{ fullDrawIndexUint32_ } + , imageCubeArray{ imageCubeArray_ } + , independentBlend{ independentBlend_ } + , geometryShader{ geometryShader_ } + , tessellationShader{ tessellationShader_ } + , sampleRateShading{ sampleRateShading_ } + , dualSrcBlend{ dualSrcBlend_ } + , logicOp{ logicOp_ } + , multiDrawIndirect{ multiDrawIndirect_ } + , drawIndirectFirstInstance{ drawIndirectFirstInstance_ } + , depthClamp{ depthClamp_ } + , depthBiasClamp{ depthBiasClamp_ } + , fillModeNonSolid{ fillModeNonSolid_ } + , depthBounds{ depthBounds_ } + , wideLines{ wideLines_ } + , largePoints{ largePoints_ } + , alphaToOne{ alphaToOne_ } + , multiViewport{ multiViewport_ } + , samplerAnisotropy{ samplerAnisotropy_ } + , textureCompressionETC2{ textureCompressionETC2_ } + , textureCompressionASTC_LDR{ textureCompressionASTC_LDR_ } + , textureCompressionBC{ textureCompressionBC_ } + , occlusionQueryPrecise{ occlusionQueryPrecise_ } + , pipelineStatisticsQuery{ pipelineStatisticsQuery_ } + , vertexPipelineStoresAndAtomics{ vertexPipelineStoresAndAtomics_ } + , fragmentStoresAndAtomics{ fragmentStoresAndAtomics_ } + , shaderTessellationAndGeometryPointSize{ shaderTessellationAndGeometryPointSize_ } + , shaderImageGatherExtended{ shaderImageGatherExtended_ } + , shaderStorageImageExtendedFormats{ shaderStorageImageExtendedFormats_ } + , shaderStorageImageMultisample{ shaderStorageImageMultisample_ } + , shaderStorageImageReadWithoutFormat{ shaderStorageImageReadWithoutFormat_ } + , shaderStorageImageWriteWithoutFormat{ shaderStorageImageWriteWithoutFormat_ } + , shaderUniformBufferArrayDynamicIndexing{ shaderUniformBufferArrayDynamicIndexing_ } + , shaderSampledImageArrayDynamicIndexing{ shaderSampledImageArrayDynamicIndexing_ } + , shaderStorageBufferArrayDynamicIndexing{ shaderStorageBufferArrayDynamicIndexing_ } + , shaderStorageImageArrayDynamicIndexing{ shaderStorageImageArrayDynamicIndexing_ } + , shaderClipDistance{ shaderClipDistance_ } + , shaderCullDistance{ shaderCullDistance_ } + , shaderFloat64{ shaderFloat64_ } + , shaderInt64{ shaderInt64_ } + , shaderInt16{ shaderInt16_ } + , shaderResourceResidency{ shaderResourceResidency_ } + , shaderResourceMinLod{ shaderResourceMinLod_ } + , sparseBinding{ sparseBinding_ } + , sparseResidencyBuffer{ sparseResidencyBuffer_ } + , sparseResidencyImage2D{ sparseResidencyImage2D_ } + , sparseResidencyImage3D{ sparseResidencyImage3D_ } + , sparseResidency2Samples{ sparseResidency2Samples_ } + , sparseResidency4Samples{ sparseResidency4Samples_ } + , sparseResidency8Samples{ sparseResidency8Samples_ } + , sparseResidency16Samples{ sparseResidency16Samples_ } + , sparseResidencyAliased{ sparseResidencyAliased_ } + , variableMultisampleRate{ variableMultisampleRate_ } + , inheritedQueries{ inheritedQueries_ } { } @@ -26860,15 +27087,15 @@ namespace VULKAN_HPP_NAMESPACE const char * const * ppEnabledExtensionNames_ = {}, const VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures * pEnabledFeatures_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , queueCreateInfoCount( queueCreateInfoCount_ ) - , pQueueCreateInfos( pQueueCreateInfos_ ) - , enabledLayerCount( enabledLayerCount_ ) - , ppEnabledLayerNames( ppEnabledLayerNames_ ) - , enabledExtensionCount( enabledExtensionCount_ ) - , ppEnabledExtensionNames( ppEnabledExtensionNames_ ) - , pEnabledFeatures( pEnabledFeatures_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , queueCreateInfoCount{ queueCreateInfoCount_ } + , pQueueCreateInfos{ pQueueCreateInfos_ } + , enabledLayerCount{ enabledLayerCount_ } + , ppEnabledLayerNames{ ppEnabledLayerNames_ } + , enabledExtensionCount{ enabledExtensionCount_ } + , ppEnabledExtensionNames{ ppEnabledExtensionNames_ } + , pEnabledFeatures{ pEnabledFeatures_ } { } @@ -27120,10 +27347,10 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkDeviceMemoryReportCallbackEXT pfnUserCallback_ = {}, void * pUserData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , pfnUserCallback( pfnUserCallback_ ) - , pUserData( pUserData_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , pfnUserCallback{ pfnUserCallback_ } + , pUserData{ pUserData_ } { } @@ -27235,8 +27462,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DeviceDiagnosticsConfigCreateInfoNV( VULKAN_HPP_NAMESPACE::DeviceDiagnosticsConfigFlagsNV flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , flags{ flags_ } { } @@ -27332,8 +27559,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DeviceEventInfoEXT( VULKAN_HPP_NAMESPACE::DeviceEventTypeEXT deviceEvent_ = VULKAN_HPP_NAMESPACE::DeviceEventTypeEXT::eDisplayHotplug, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceEvent( deviceEvent_ ) + : pNext{ pNext_ } + , deviceEvent{ deviceEvent_ } { } @@ -27425,9 +27652,9 @@ namespace VULKAN_HPP_NAMESPACE DeviceFaultAddressInfoEXT( VULKAN_HPP_NAMESPACE::DeviceFaultAddressTypeEXT addressType_ = VULKAN_HPP_NAMESPACE::DeviceFaultAddressTypeEXT::eNone, VULKAN_HPP_NAMESPACE::DeviceAddress reportedAddress_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize addressPrecision_ = {} ) VULKAN_HPP_NOEXCEPT - : addressType( addressType_ ) - , reportedAddress( reportedAddress_ ) - , addressPrecision( addressPrecision_ ) + : addressType{ addressType_ } + , reportedAddress{ reportedAddress_ } + , addressPrecision{ addressPrecision_ } { } @@ -27525,10 +27752,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t vendorInfoCount_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize vendorBinarySize_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , addressInfoCount( addressInfoCount_ ) - , vendorInfoCount( vendorInfoCount_ ) - , vendorBinarySize( vendorBinarySize_ ) + : pNext{ pNext_ } + , addressInfoCount{ addressInfoCount_ } + , vendorInfoCount{ vendorInfoCount_ } + , vendorBinarySize{ vendorBinarySize_ } { } @@ -27637,9 +27864,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_14 DeviceFaultVendorInfoEXT( std::array const & description_ = {}, uint64_t vendorFaultCode_ = {}, uint64_t vendorFaultData_ = {} ) VULKAN_HPP_NOEXCEPT - : description( description_ ) - , vendorFaultCode( vendorFaultCode_ ) - , vendorFaultData( vendorFaultData_ ) + : description{ description_ } + , vendorFaultCode{ vendorFaultCode_ } + , vendorFaultData{ vendorFaultData_ } { } @@ -27770,11 +27997,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceFaultVendorInfoEXT * pVendorInfos_ = {}, void * pVendorBinaryData_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , description( description_ ) - , pAddressInfos( pAddressInfos_ ) - , pVendorInfos( pVendorInfos_ ) - , pVendorBinaryData( pVendorBinaryData_ ) + : pNext{ pNext_ } + , description{ description_ } + , pAddressInfos{ pAddressInfos_ } + , pVendorInfos{ pVendorInfos_ } + , pVendorBinaryData{ pVendorBinaryData_ } { } @@ -27789,10 +28016,10 @@ namespace VULKAN_HPP_NAMESPACE DeviceFaultInfoEXT & operator=( DeviceFaultInfoEXT const & ) = delete; DeviceFaultInfoEXT( DeviceFaultInfoEXT && rhs ) VULKAN_HPP_NOEXCEPT - : pNext( rhs.pNext ) - , pAddressInfos( rhs.pAddressInfos ) - , pVendorInfos( rhs.pVendorInfos ) - , pVendorBinaryData( rhs.pVendorBinaryData ) + : pNext{ rhs.pNext } + , pAddressInfos{ rhs.pAddressInfos } + , pVendorInfos{ rhs.pVendorInfos } + , pVendorBinaryData{ rhs.pVendorBinaryData } { memcpy( description, rhs.description, VK_MAX_DESCRIPTION_SIZE ); @@ -27923,17 +28150,17 @@ namespace VULKAN_HPP_NAMESPACE uint32_t engineNameOffset_ = {}, uint32_t engineVersion_ = {}, uint32_t apiVersion_ = {} ) VULKAN_HPP_NOEXCEPT - : headerSize( headerSize_ ) - , headerVersion( headerVersion_ ) - , vendorID( vendorID_ ) - , deviceID( deviceID_ ) - , driverVersion( driverVersion_ ) - , pipelineCacheUUID( pipelineCacheUUID_ ) - , applicationNameOffset( applicationNameOffset_ ) - , applicationVersion( applicationVersion_ ) - , engineNameOffset( engineNameOffset_ ) - , engineVersion( engineVersion_ ) - , apiVersion( apiVersion_ ) + : headerSize{ headerSize_ } + , headerVersion{ headerVersion_ } + , vendorID{ vendorID_ } + , deviceID{ deviceID_ } + , driverVersion{ driverVersion_ } + , pipelineCacheUUID{ pipelineCacheUUID_ } + , applicationNameOffset{ applicationNameOffset_ } + , applicationVersion{ applicationVersion_ } + , engineNameOffset{ engineNameOffset_ } + , engineVersion{ engineVersion_ } + , apiVersion{ apiVersion_ } { } @@ -28110,9 +28337,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DeviceGroupBindSparseInfo( uint32_t resourceDeviceIndex_ = {}, uint32_t memoryDeviceIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , resourceDeviceIndex( resourceDeviceIndex_ ) - , memoryDeviceIndex( memoryDeviceIndex_ ) + : pNext{ pNext_ } + , resourceDeviceIndex{ resourceDeviceIndex_ } + , memoryDeviceIndex{ memoryDeviceIndex_ } { } @@ -28217,8 +28444,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DeviceGroupCommandBufferBeginInfo( uint32_t deviceMask_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceMask( deviceMask_ ) + : pNext{ pNext_ } + , deviceMask{ deviceMask_ } { } @@ -28317,9 +28544,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DeviceGroupDeviceCreateInfo( uint32_t physicalDeviceCount_ = {}, const VULKAN_HPP_NAMESPACE::PhysicalDevice * pPhysicalDevices_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , physicalDeviceCount( physicalDeviceCount_ ) - , pPhysicalDevices( pPhysicalDevices_ ) + : pNext{ pNext_ } + , physicalDeviceCount{ physicalDeviceCount_ } + , pPhysicalDevices{ pPhysicalDevices_ } { } @@ -28445,9 +28672,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_14 DeviceGroupPresentCapabilitiesKHR( std::array const & presentMask_ = {}, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , presentMask( presentMask_ ) - , modes( modes_ ) + : pNext{ pNext_ } + , presentMask{ presentMask_ } + , modes{ modes_ } { } @@ -28536,10 +28763,10 @@ namespace VULKAN_HPP_NAMESPACE const uint32_t * pDeviceMasks_ = {}, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagBitsKHR mode_ = VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagBitsKHR::eLocal, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , swapchainCount( swapchainCount_ ) - , pDeviceMasks( pDeviceMasks_ ) - , mode( mode_ ) + : pNext{ pNext_ } + , swapchainCount{ swapchainCount_ } + , pDeviceMasks{ pDeviceMasks_ } + , mode{ mode_ } { } @@ -28674,10 +28901,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t deviceRenderAreaCount_ = {}, const VULKAN_HPP_NAMESPACE::Rect2D * pDeviceRenderAreas_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceMask( deviceMask_ ) - , deviceRenderAreaCount( deviceRenderAreaCount_ ) - , pDeviceRenderAreas( pDeviceRenderAreas_ ) + : pNext{ pNext_ } + , deviceMask{ deviceMask_ } + , deviceRenderAreaCount{ deviceRenderAreaCount_ } + , pDeviceRenderAreas{ pDeviceRenderAreas_ } { } @@ -28819,13 +29046,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t signalSemaphoreCount_ = {}, const uint32_t * pSignalSemaphoreDeviceIndices_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , waitSemaphoreCount( waitSemaphoreCount_ ) - , pWaitSemaphoreDeviceIndices( pWaitSemaphoreDeviceIndices_ ) - , commandBufferCount( commandBufferCount_ ) - , pCommandBufferDeviceMasks( pCommandBufferDeviceMasks_ ) - , signalSemaphoreCount( signalSemaphoreCount_ ) - , pSignalSemaphoreDeviceIndices( pSignalSemaphoreDeviceIndices_ ) + : pNext{ pNext_ } + , waitSemaphoreCount{ waitSemaphoreCount_ } + , pWaitSemaphoreDeviceIndices{ pWaitSemaphoreDeviceIndices_ } + , commandBufferCount{ commandBufferCount_ } + , pCommandBufferDeviceMasks{ pCommandBufferDeviceMasks_ } + , signalSemaphoreCount{ signalSemaphoreCount_ } + , pSignalSemaphoreDeviceIndices{ pSignalSemaphoreDeviceIndices_ } { } @@ -29021,8 +29248,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DeviceGroupSwapchainCreateInfoKHR( VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , modes( modes_ ) + : pNext{ pNext_ } + , modes{ modes_ } { } @@ -29130,20 +29357,20 @@ namespace VULKAN_HPP_NAMESPACE const uint32_t * pQueueFamilyIndices_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout initialLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , imageType( imageType_ ) - , format( format_ ) - , extent( extent_ ) - , mipLevels( mipLevels_ ) - , arrayLayers( arrayLayers_ ) - , samples( samples_ ) - , tiling( tiling_ ) - , usage( usage_ ) - , sharingMode( sharingMode_ ) - , queueFamilyIndexCount( queueFamilyIndexCount_ ) - , pQueueFamilyIndices( pQueueFamilyIndices_ ) - , initialLayout( initialLayout_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , imageType{ imageType_ } + , format{ format_ } + , extent{ extent_ } + , mipLevels{ mipLevels_ } + , arrayLayers{ arrayLayers_ } + , samples{ samples_ } + , tiling{ tiling_ } + , usage{ usage_ } + , sharingMode{ sharingMode_ } + , queueFamilyIndexCount{ queueFamilyIndexCount_ } + , pQueueFamilyIndices{ pQueueFamilyIndices_ } + , initialLayout{ initialLayout_ } { } @@ -29395,9 +29622,9 @@ namespace VULKAN_HPP_NAMESPACE DeviceImageMemoryRequirements( const VULKAN_HPP_NAMESPACE::ImageCreateInfo * pCreateInfo_ = {}, VULKAN_HPP_NAMESPACE::ImageAspectFlagBits planeAspect_ = VULKAN_HPP_NAMESPACE::ImageAspectFlagBits::eColor, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pCreateInfo( pCreateInfo_ ) - , planeAspect( planeAspect_ ) + : pNext{ pNext_ } + , pCreateInfo{ pCreateInfo_ } + , planeAspect{ planeAspect_ } { } @@ -29504,8 +29731,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImageSubresource2KHR( VULKAN_HPP_NAMESPACE::ImageSubresource imageSubresource_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageSubresource( imageSubresource_ ) + : pNext{ pNext_ } + , imageSubresource{ imageSubresource_ } { } @@ -29604,9 +29831,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DeviceImageSubresourceInfoKHR( const VULKAN_HPP_NAMESPACE::ImageCreateInfo * pCreateInfo_ = {}, const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR * pSubresource_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pCreateInfo( pCreateInfo_ ) - , pSubresource( pSubresource_ ) + : pNext{ pNext_ } + , pCreateInfo{ pCreateInfo_ } + , pSubresource{ pSubresource_ } { } @@ -29713,8 +29940,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DeviceMemoryOpaqueCaptureAddressInfo( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memory( memory_ ) + : pNext{ pNext_ } + , memory{ memory_ } { } @@ -29813,8 +30040,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DeviceMemoryOverallocationCreateInfoAMD( VULKAN_HPP_NAMESPACE::MemoryOverallocationBehaviorAMD overallocationBehavior_ = VULKAN_HPP_NAMESPACE::MemoryOverallocationBehaviorAMD::eDefault, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , overallocationBehavior( overallocationBehavior_ ) + : pNext{ pNext_ } + , overallocationBehavior{ overallocationBehavior_ } { } @@ -29918,14 +30145,14 @@ namespace VULKAN_HPP_NAMESPACE uint64_t objectHandle_ = {}, uint32_t heapIndex_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , type( type_ ) - , memoryObjectId( memoryObjectId_ ) - , size( size_ ) - , objectType( objectType_ ) - , objectHandle( objectHandle_ ) - , heapIndex( heapIndex_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , type{ type_ } + , memoryObjectId{ memoryObjectId_ } + , size{ size_ } + , objectType{ objectType_ } + , objectHandle{ objectHandle_ } + , heapIndex{ heapIndex_ } { } @@ -30057,6 +30284,104 @@ namespace VULKAN_HPP_NAMESPACE }; #endif /*VK_ENABLE_BETA_EXTENSIONS*/ + struct DevicePipelineBinaryInternalCacheControlKHR + { + using NativeType = VkDevicePipelineBinaryInternalCacheControlKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDevicePipelineBinaryInternalCacheControlKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DevicePipelineBinaryInternalCacheControlKHR( VULKAN_HPP_NAMESPACE::Bool32 disableInternalCache_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , disableInternalCache{ disableInternalCache_ } + { + } + + VULKAN_HPP_CONSTEXPR DevicePipelineBinaryInternalCacheControlKHR( DevicePipelineBinaryInternalCacheControlKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DevicePipelineBinaryInternalCacheControlKHR( VkDevicePipelineBinaryInternalCacheControlKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : DevicePipelineBinaryInternalCacheControlKHR( *reinterpret_cast( &rhs ) ) + { + } + + DevicePipelineBinaryInternalCacheControlKHR & operator=( DevicePipelineBinaryInternalCacheControlKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + DevicePipelineBinaryInternalCacheControlKHR & operator=( VkDevicePipelineBinaryInternalCacheControlKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 DevicePipelineBinaryInternalCacheControlKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DevicePipelineBinaryInternalCacheControlKHR & + setDisableInternalCache( VULKAN_HPP_NAMESPACE::Bool32 disableInternalCache_ ) VULKAN_HPP_NOEXCEPT + { + disableInternalCache = disableInternalCache_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkDevicePipelineBinaryInternalCacheControlKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDevicePipelineBinaryInternalCacheControlKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, disableInternalCache ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( DevicePipelineBinaryInternalCacheControlKHR const & ) const = default; +#else + bool operator==( DevicePipelineBinaryInternalCacheControlKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( disableInternalCache == rhs.disableInternalCache ); +# endif + } + + bool operator!=( DevicePipelineBinaryInternalCacheControlKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDevicePipelineBinaryInternalCacheControlKHR; + const void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 disableInternalCache = {}; + }; + + template <> + struct CppType + { + using Type = DevicePipelineBinaryInternalCacheControlKHR; + }; + struct DevicePrivateDataCreateInfo { using NativeType = VkDevicePrivateDataCreateInfo; @@ -30066,8 +30391,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DevicePrivateDataCreateInfo( uint32_t privateDataSlotRequestCount_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , privateDataSlotRequestCount( privateDataSlotRequestCount_ ) + : pNext{ pNext_ } + , privateDataSlotRequestCount{ privateDataSlotRequestCount_ } { } @@ -30166,8 +30491,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DeviceQueueGlobalPriorityCreateInfoKHR( VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR globalPriority_ = VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , globalPriority( globalPriority_ ) + : pNext{ pNext_ } + , globalPriority{ globalPriority_ } { } @@ -30268,10 +30593,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t queueFamilyIndex_ = {}, uint32_t queueIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , queueFamilyIndex( queueFamilyIndex_ ) - , queueIndex( queueIndex_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , queueFamilyIndex{ queueFamilyIndex_ } + , queueIndex{ queueIndex_ } { } @@ -30382,8 +30707,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DeviceQueueShaderCoreControlCreateInfoARM( uint32_t shaderCoreCount_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderCoreCount( shaderCoreCount_ ) + : pNext{ pNext_ } + , shaderCoreCount{ shaderCoreCount_ } { } @@ -30480,9 +30805,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DirectDriverLoadingInfoLUNARG( VULKAN_HPP_NAMESPACE::DirectDriverLoadingFlagsLUNARG flags_ = {}, PFN_vkGetInstanceProcAddrLUNARG pfnGetInstanceProcAddr_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , pfnGetInstanceProcAddr( pfnGetInstanceProcAddr_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , pfnGetInstanceProcAddr{ pfnGetInstanceProcAddr_ } { } @@ -30588,10 +30913,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t driverCount_ = {}, const VULKAN_HPP_NAMESPACE::DirectDriverLoadingInfoLUNARG * pDrivers_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , mode( mode_ ) - , driverCount( driverCount_ ) - , pDrivers( pDrivers_ ) + : pNext{ pNext_ } + , mode{ mode_ } + , driverCount{ driverCount_ } + , pDrivers{ pDrivers_ } { } @@ -30724,10 +31049,10 @@ namespace VULKAN_HPP_NAMESPACE IDirectFB * dfb_ = {}, IDirectFBSurface * surface_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , dfb( dfb_ ) - , surface( surface_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , dfb{ dfb_ } + , surface{ surface_ } { } @@ -30841,9 +31166,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_14 DispatchGraphCountInfoAMDX( uint32_t count_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstAMDX infos_ = {}, uint64_t stride_ = {} ) VULKAN_HPP_NOEXCEPT - : count( count_ ) - , infos( infos_ ) - , stride( stride_ ) + : count{ count_ } + , infos{ infos_ } + , stride{ stride_ } { } @@ -30922,10 +31247,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t payloadCount_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstAMDX payloads_ = {}, uint64_t payloadStride_ = {} ) VULKAN_HPP_NOEXCEPT - : nodeIndex( nodeIndex_ ) - , payloadCount( payloadCount_ ) - , payloads( payloads_ ) - , payloadStride( payloadStride_ ) + : nodeIndex{ nodeIndex_ } + , payloadCount{ payloadCount_ } + , payloads{ payloads_ } + , payloadStride{ payloadStride_ } { } @@ -31007,9 +31332,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DispatchIndirectCommand( uint32_t x_ = {}, uint32_t y_ = {}, uint32_t z_ = {} ) VULKAN_HPP_NOEXCEPT - : x( x_ ) - , y( y_ ) - , z( z_ ) + : x{ x_ } + , y{ y_ } + , z{ z_ } { } @@ -31106,8 +31431,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DisplayEventInfoEXT( VULKAN_HPP_NAMESPACE::DisplayEventTypeEXT displayEvent_ = VULKAN_HPP_NAMESPACE::DisplayEventTypeEXT::eFirstPixelOut, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , displayEvent( displayEvent_ ) + : pNext{ pNext_ } + , displayEvent{ displayEvent_ } { } @@ -31198,8 +31523,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DisplayModeParametersKHR( VULKAN_HPP_NAMESPACE::Extent2D visibleRegion_ = {}, uint32_t refreshRate_ = {} ) VULKAN_HPP_NOEXCEPT - : visibleRegion( visibleRegion_ ) - , refreshRate( refreshRate_ ) + : visibleRegion{ visibleRegion_ } + , refreshRate{ refreshRate_ } { } @@ -31289,9 +31614,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DisplayModeCreateInfoKHR( VULKAN_HPP_NAMESPACE::DisplayModeCreateFlagsKHR flags_ = {}, VULKAN_HPP_NAMESPACE::DisplayModeParametersKHR parameters_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , parameters( parameters_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , parameters{ parameters_ } { } @@ -31394,8 +31719,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayModeKHR displayMode_ = {}, VULKAN_HPP_NAMESPACE::DisplayModeParametersKHR parameters_ = {} ) VULKAN_HPP_NOEXCEPT - : displayMode( displayMode_ ) - , parameters( parameters_ ) + : displayMode{ displayMode_ } + , parameters{ parameters_ } { } @@ -31470,8 +31795,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayModePropertiesKHR displayModeProperties_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , displayModeProperties( displayModeProperties_ ) + : pNext{ pNext_ } + , displayModeProperties{ displayModeProperties_ } { } @@ -31553,8 +31878,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DisplayNativeHdrSurfaceCapabilitiesAMD( VULKAN_HPP_NAMESPACE::Bool32 localDimmingSupport_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , localDimmingSupport( localDimmingSupport_ ) + : pNext{ pNext_ } + , localDimmingSupport{ localDimmingSupport_ } { } @@ -31640,15 +31965,15 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Offset2D maxDstPosition_ = {}, VULKAN_HPP_NAMESPACE::Extent2D minDstExtent_ = {}, VULKAN_HPP_NAMESPACE::Extent2D maxDstExtent_ = {} ) VULKAN_HPP_NOEXCEPT - : supportedAlpha( supportedAlpha_ ) - , minSrcPosition( minSrcPosition_ ) - , maxSrcPosition( maxSrcPosition_ ) - , minSrcExtent( minSrcExtent_ ) - , maxSrcExtent( maxSrcExtent_ ) - , minDstPosition( minDstPosition_ ) - , maxDstPosition( maxDstPosition_ ) - , minDstExtent( minDstExtent_ ) - , maxDstExtent( maxDstExtent_ ) + : supportedAlpha{ supportedAlpha_ } + , minSrcPosition{ minSrcPosition_ } + , maxSrcPosition{ maxSrcPosition_ } + , minSrcExtent{ minSrcExtent_ } + , maxSrcExtent{ maxSrcExtent_ } + , minDstPosition{ minDstPosition_ } + , maxDstPosition{ maxDstPosition_ } + , minDstExtent{ minDstExtent_ } + , maxDstExtent{ maxDstExtent_ } { } @@ -31740,8 +32065,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DisplayPlaneCapabilities2KHR( VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR capabilities_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , capabilities( capabilities_ ) + : pNext{ pNext_ } + , capabilities{ capabilities_ } { } @@ -31823,9 +32148,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DisplayPlaneInfo2KHR( VULKAN_HPP_NAMESPACE::DisplayModeKHR mode_ = {}, uint32_t planeIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , mode( mode_ ) - , planeIndex( planeIndex_ ) + : pNext{ pNext_ } + , mode{ mode_ } + , planeIndex{ planeIndex_ } { } @@ -31925,8 +32250,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DisplayPlanePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR currentDisplay_ = {}, uint32_t currentStackIndex_ = {} ) VULKAN_HPP_NOEXCEPT - : currentDisplay( currentDisplay_ ) - , currentStackIndex( currentStackIndex_ ) + : currentDisplay{ currentDisplay_ } + , currentStackIndex{ currentStackIndex_ } { } @@ -32001,8 +32326,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DisplayPlaneProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayPlanePropertiesKHR displayPlaneProperties_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , displayPlaneProperties( displayPlaneProperties_ ) + : pNext{ pNext_ } + , displayPlaneProperties{ displayPlaneProperties_ } { } @@ -32084,8 +32409,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DisplayPowerInfoEXT( VULKAN_HPP_NAMESPACE::DisplayPowerStateEXT powerState_ = VULKAN_HPP_NAMESPACE::DisplayPowerStateEXT::eOff, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , powerState( powerState_ ) + : pNext{ pNext_ } + , powerState{ powerState_ } { } @@ -32182,10 +32507,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Rect2D dstRect_ = {}, VULKAN_HPP_NAMESPACE::Bool32 persistent_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcRect( srcRect_ ) - , dstRect( dstRect_ ) - , persistent( persistent_ ) + : pNext{ pNext_ } + , srcRect{ srcRect_ } + , dstRect{ dstRect_ } + , persistent{ persistent_ } { } @@ -32301,13 +32626,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::SurfaceTransformFlagsKHR supportedTransforms_ = {}, VULKAN_HPP_NAMESPACE::Bool32 planeReorderPossible_ = {}, VULKAN_HPP_NAMESPACE::Bool32 persistentContent_ = {} ) VULKAN_HPP_NOEXCEPT - : display( display_ ) - , displayName( displayName_ ) - , physicalDimensions( physicalDimensions_ ) - , physicalResolution( physicalResolution_ ) - , supportedTransforms( supportedTransforms_ ) - , planeReorderPossible( planeReorderPossible_ ) - , persistentContent( persistentContent_ ) + : display{ display_ } + , displayName{ displayName_ } + , physicalDimensions{ physicalDimensions_ } + , physicalResolution{ physicalResolution_ } + , supportedTransforms{ supportedTransforms_ } + , planeReorderPossible{ planeReorderPossible_ } + , persistentContent{ persistentContent_ } { } @@ -32411,8 +32736,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DisplayProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayPropertiesKHR displayProperties_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , displayProperties( displayProperties_ ) + : pNext{ pNext_ } + , displayProperties{ displayProperties_ } { } @@ -32502,15 +32827,15 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DisplayPlaneAlphaFlagBitsKHR alphaMode_ = VULKAN_HPP_NAMESPACE::DisplayPlaneAlphaFlagBitsKHR::eOpaque, VULKAN_HPP_NAMESPACE::Extent2D imageExtent_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , displayMode( displayMode_ ) - , planeIndex( planeIndex_ ) - , planeStackIndex( planeStackIndex_ ) - , transform( transform_ ) - , globalAlpha( globalAlpha_ ) - , alphaMode( alphaMode_ ) - , imageExtent( imageExtent_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , displayMode{ displayMode_ } + , planeIndex{ planeIndex_ } + , planeStackIndex{ planeStackIndex_ } + , transform{ transform_ } + , globalAlpha{ globalAlpha_ } + , alphaMode{ alphaMode_ } + , imageExtent{ imageExtent_ } { } @@ -32666,11 +32991,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t firstIndex_ = {}, int32_t vertexOffset_ = {}, uint32_t firstInstance_ = {} ) VULKAN_HPP_NOEXCEPT - : indexCount( indexCount_ ) - , instanceCount( instanceCount_ ) - , firstIndex( firstIndex_ ) - , vertexOffset( vertexOffset_ ) - , firstInstance( firstInstance_ ) + : indexCount{ indexCount_ } + , instanceCount{ instanceCount_ } + , firstIndex{ firstIndex_ } + , vertexOffset{ vertexOffset_ } + , firstInstance{ firstInstance_ } { } @@ -32780,10 +33105,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t instanceCount_ = {}, uint32_t firstVertex_ = {}, uint32_t firstInstance_ = {} ) VULKAN_HPP_NOEXCEPT - : vertexCount( vertexCount_ ) - , instanceCount( instanceCount_ ) - , firstVertex( firstVertex_ ) - , firstInstance( firstInstance_ ) + : vertexCount{ vertexCount_ } + , instanceCount{ instanceCount_ } + , firstVertex{ firstVertex_ } + , firstInstance{ firstInstance_ } { } @@ -32883,9 +33208,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DrawMeshTasksIndirectCommandEXT( uint32_t groupCountX_ = {}, uint32_t groupCountY_ = {}, uint32_t groupCountZ_ = {} ) VULKAN_HPP_NOEXCEPT - : groupCountX( groupCountX_ ) - , groupCountY( groupCountY_ ) - , groupCountZ( groupCountZ_ ) + : groupCountX{ groupCountX_ } + , groupCountY{ groupCountY_ } + , groupCountZ{ groupCountZ_ } { } @@ -32977,8 +33302,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR DrawMeshTasksIndirectCommandNV( uint32_t taskCount_ = {}, uint32_t firstTask_ = {} ) VULKAN_HPP_NOEXCEPT - : taskCount( taskCount_ ) - , firstTask( firstTask_ ) + : taskCount{ taskCount_ } + , firstTask{ firstTask_ } { } @@ -33065,9 +33390,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DrmFormatModifierProperties2EXT( uint64_t drmFormatModifier_ = {}, uint32_t drmFormatModifierPlaneCount_ = {}, VULKAN_HPP_NAMESPACE::FormatFeatureFlags2 drmFormatModifierTilingFeatures_ = {} ) VULKAN_HPP_NOEXCEPT - : drmFormatModifier( drmFormatModifier_ ) - , drmFormatModifierPlaneCount( drmFormatModifierPlaneCount_ ) - , drmFormatModifierTilingFeatures( drmFormatModifierTilingFeatures_ ) + : drmFormatModifier{ drmFormatModifier_ } + , drmFormatModifierPlaneCount{ drmFormatModifierPlaneCount_ } + , drmFormatModifierTilingFeatures{ drmFormatModifierTilingFeatures_ } { } @@ -33142,9 +33467,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DrmFormatModifierPropertiesEXT( uint64_t drmFormatModifier_ = {}, uint32_t drmFormatModifierPlaneCount_ = {}, VULKAN_HPP_NAMESPACE::FormatFeatureFlags drmFormatModifierTilingFeatures_ = {} ) VULKAN_HPP_NOEXCEPT - : drmFormatModifier( drmFormatModifier_ ) - , drmFormatModifierPlaneCount( drmFormatModifierPlaneCount_ ) - , drmFormatModifierTilingFeatures( drmFormatModifierTilingFeatures_ ) + : drmFormatModifier{ drmFormatModifier_ } + , drmFormatModifierPlaneCount{ drmFormatModifierPlaneCount_ } + , drmFormatModifierTilingFeatures{ drmFormatModifierTilingFeatures_ } { } @@ -33222,9 +33547,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DrmFormatModifierPropertiesList2EXT( uint32_t drmFormatModifierCount_ = {}, VULKAN_HPP_NAMESPACE::DrmFormatModifierProperties2EXT * pDrmFormatModifierProperties_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , drmFormatModifierCount( drmFormatModifierCount_ ) - , pDrmFormatModifierProperties( pDrmFormatModifierProperties_ ) + : pNext{ pNext_ } + , drmFormatModifierCount{ drmFormatModifierCount_ } + , pDrmFormatModifierProperties{ pDrmFormatModifierProperties_ } { } @@ -33235,17 +33560,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - DrmFormatModifierPropertiesList2EXT( - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & drmFormatModifierProperties_, - void * pNext_ = nullptr ) - : pNext( pNext_ ) - , drmFormatModifierCount( static_cast( drmFormatModifierProperties_.size() ) ) - , pDrmFormatModifierProperties( drmFormatModifierProperties_.data() ) - { - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - DrmFormatModifierPropertiesList2EXT & operator=( DrmFormatModifierPropertiesList2EXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -33320,9 +33634,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR DrmFormatModifierPropertiesListEXT( uint32_t drmFormatModifierCount_ = {}, VULKAN_HPP_NAMESPACE::DrmFormatModifierPropertiesEXT * pDrmFormatModifierProperties_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , drmFormatModifierCount( drmFormatModifierCount_ ) - , pDrmFormatModifierProperties( pDrmFormatModifierProperties_ ) + : pNext{ pNext_ } + , drmFormatModifierCount{ drmFormatModifierCount_ } + , pDrmFormatModifierProperties{ pDrmFormatModifierProperties_ } { } @@ -33333,17 +33647,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - DrmFormatModifierPropertiesListEXT( - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & drmFormatModifierProperties_, - void * pNext_ = nullptr ) - : pNext( pNext_ ) - , drmFormatModifierCount( static_cast( drmFormatModifierProperties_.size() ) ) - , pDrmFormatModifierProperties( drmFormatModifierProperties_.data() ) - { - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - DrmFormatModifierPropertiesListEXT & operator=( DrmFormatModifierPropertiesListEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -33416,8 +33719,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR EventCreateInfo( VULKAN_HPP_NAMESPACE::EventCreateFlags flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , flags{ flags_ } { } @@ -33511,9 +33814,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PipelineLibraryCreateInfoKHR( uint32_t libraryCount_ = {}, const VULKAN_HPP_NAMESPACE::Pipeline * pLibraries_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , libraryCount( libraryCount_ ) - , pLibraries( pLibraries_ ) + : pNext{ pNext_ } + , libraryCount{ libraryCount_ } + , pLibraries{ pLibraries_ } { } @@ -33641,14 +33944,14 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , stageCount( stageCount_ ) - , pStages( pStages_ ) - , pLibraryInfo( pLibraryInfo_ ) - , layout( layout_ ) - , basePipelineHandle( basePipelineHandle_ ) - , basePipelineIndex( basePipelineIndex_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , stageCount{ stageCount_ } + , pStages{ pStages_ } + , pLibraryInfo{ pLibraryInfo_ } + , layout{ layout_ } + , basePipelineHandle{ basePipelineHandle_ } + , basePipelineIndex{ basePipelineIndex_ } { } @@ -33831,8 +34134,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExecutionGraphPipelineScratchSizeAMDX( VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , size( size_ ) + : pNext{ pNext_ } + , size{ size_ } { } @@ -33929,8 +34232,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExportFenceCreateInfo( VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlags handleTypes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleTypes( handleTypes_ ) + : pNext{ pNext_ } + , handleTypes{ handleTypes_ } { } @@ -34031,10 +34334,10 @@ namespace VULKAN_HPP_NAMESPACE DWORD dwAccess_ = {}, LPCWSTR name_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pAttributes( pAttributes_ ) - , dwAccess( dwAccess_ ) - , name( name_ ) + : pNext{ pNext_ } + , pAttributes{ pAttributes_ } + , dwAccess{ dwAccess_ } + , name{ name_ } { } @@ -34145,8 +34448,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExportMemoryAllocateInfo( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags handleTypes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleTypes( handleTypes_ ) + : pNext{ pNext_ } + , handleTypes{ handleTypes_ } { } @@ -34244,8 +34547,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExportMemoryAllocateInfoNV( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleTypes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleTypes( handleTypes_ ) + : pNext{ pNext_ } + , handleTypes{ handleTypes_ } { } @@ -34345,10 +34648,10 @@ namespace VULKAN_HPP_NAMESPACE DWORD dwAccess_ = {}, LPCWSTR name_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pAttributes( pAttributes_ ) - , dwAccess( dwAccess_ ) - , name( name_ ) + : pNext{ pNext_ } + , pAttributes{ pAttributes_ } + , dwAccess{ dwAccess_ } + , name{ name_ } { } @@ -34460,9 +34763,9 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExportMemoryWin32HandleInfoNV( const SECURITY_ATTRIBUTES * pAttributes_ = {}, DWORD dwAccess_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pAttributes( pAttributes_ ) - , dwAccess( dwAccess_ ) + : pNext{ pNext_ } + , pAttributes{ pAttributes_ } + , dwAccess{ dwAccess_ } { } @@ -34568,9 +34871,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ExportMetalBufferInfoEXT( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, MTLBuffer_id mtlBuffer_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memory( memory_ ) - , mtlBuffer( mtlBuffer_ ) + : pNext{ pNext_ } + , memory{ memory_ } + , mtlBuffer{ mtlBuffer_ } { } @@ -34676,9 +34979,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ExportMetalCommandQueueInfoEXT( VULKAN_HPP_NAMESPACE::Queue queue_ = {}, MTLCommandQueue_id mtlCommandQueue_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , queue( queue_ ) - , mtlCommandQueue( mtlCommandQueue_ ) + : pNext{ pNext_ } + , queue{ queue_ } + , mtlCommandQueue{ mtlCommandQueue_ } { } @@ -34782,8 +35085,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExportMetalDeviceInfoEXT( MTLDevice_id mtlDevice_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , mtlDevice( mtlDevice_ ) + : pNext{ pNext_ } + , mtlDevice{ mtlDevice_ } { } @@ -34881,9 +35184,9 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExportMetalIOSurfaceInfoEXT( VULKAN_HPP_NAMESPACE::Image image_ = {}, IOSurfaceRef ioSurface_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , image( image_ ) - , ioSurface( ioSurface_ ) + : pNext{ pNext_ } + , image{ image_ } + , ioSurface{ ioSurface_ } { } @@ -34989,8 +35292,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ExportMetalObjectCreateInfoEXT( VULKAN_HPP_NAMESPACE::ExportMetalObjectTypeFlagBitsEXT exportObjectType_ = VULKAN_HPP_NAMESPACE::ExportMetalObjectTypeFlagBitsEXT::eMetalDevice, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , exportObjectType( exportObjectType_ ) + : pNext{ pNext_ } + , exportObjectType{ exportObjectType_ } { } @@ -35087,7 +35390,7 @@ namespace VULKAN_HPP_NAMESPACE static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExportMetalObjectsInfoEXT; # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR ExportMetalObjectsInfoEXT( const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext( pNext_ ) {} + VULKAN_HPP_CONSTEXPR ExportMetalObjectsInfoEXT( const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } {} VULKAN_HPP_CONSTEXPR ExportMetalObjectsInfoEXT( ExportMetalObjectsInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; @@ -35178,10 +35481,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Event event_ = {}, MTLSharedEvent_id mtlSharedEvent_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , semaphore( semaphore_ ) - , event( event_ ) - , mtlSharedEvent( mtlSharedEvent_ ) + : pNext{ pNext_ } + , semaphore{ semaphore_ } + , event{ event_ } + , mtlSharedEvent{ mtlSharedEvent_ } { } @@ -35302,12 +35605,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageAspectFlagBits plane_ = VULKAN_HPP_NAMESPACE::ImageAspectFlagBits::eColor, MTLTexture_id mtlTexture_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , image( image_ ) - , imageView( imageView_ ) - , bufferView( bufferView_ ) - , plane( plane_ ) - , mtlTexture( mtlTexture_ ) + : pNext{ pNext_ } + , image{ image_ } + , imageView{ imageView_ } + , bufferView{ bufferView_ } + , plane{ plane_ } + , mtlTexture{ mtlTexture_ } { } @@ -35439,8 +35742,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExportSemaphoreCreateInfo( VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlags handleTypes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleTypes( handleTypes_ ) + : pNext{ pNext_ } + , handleTypes{ handleTypes_ } { } @@ -35542,10 +35845,10 @@ namespace VULKAN_HPP_NAMESPACE DWORD dwAccess_ = {}, LPCWSTR name_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pAttributes( pAttributes_ ) - , dwAccess( dwAccess_ ) - , name( name_ ) + : pNext{ pNext_ } + , pAttributes{ pAttributes_ } + , dwAccess{ dwAccess_ } + , name{ name_ } { } @@ -35653,8 +35956,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR_14 ExtensionProperties( std::array const & extensionName_ = {}, uint32_t specVersion_ = {} ) VULKAN_HPP_NOEXCEPT - : extensionName( extensionName_ ) - , specVersion( specVersion_ ) + : extensionName{ extensionName_ } + , specVersion{ specVersion_ } { } @@ -35664,18 +35967,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - ExtensionProperties( std::string const & extensionName_, uint32_t specVersion_ = {} ) : specVersion( specVersion_ ) - { - VULKAN_HPP_ASSERT( extensionName_.size() < VK_MAX_EXTENSION_NAME_SIZE ); -# if defined( WIN32 ) - strncpy_s( extensionName, VK_MAX_EXTENSION_NAME_SIZE, extensionName_.data(), extensionName_.size() ); -# else - strncpy( extensionName, extensionName_.data(), std::min( VK_MAX_EXTENSION_NAME_SIZE, extensionName_.size() ) ); -# endif - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - ExtensionProperties & operator=( ExtensionProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -35742,9 +36033,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ExternalMemoryProperties( VULKAN_HPP_NAMESPACE::ExternalMemoryFeatureFlags externalMemoryFeatures_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags exportFromImportedHandleTypes_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags compatibleHandleTypes_ = {} ) VULKAN_HPP_NOEXCEPT - : externalMemoryFeatures( externalMemoryFeatures_ ) - , exportFromImportedHandleTypes( exportFromImportedHandleTypes_ ) - , compatibleHandleTypes( compatibleHandleTypes_ ) + : externalMemoryFeatures{ externalMemoryFeatures_ } + , exportFromImportedHandleTypes{ exportFromImportedHandleTypes_ } + , compatibleHandleTypes{ compatibleHandleTypes_ } { } @@ -35825,8 +36116,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExternalBufferProperties( VULKAN_HPP_NAMESPACE::ExternalMemoryProperties externalMemoryProperties_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , externalMemoryProperties( externalMemoryProperties_ ) + : pNext{ pNext_ } + , externalMemoryProperties{ externalMemoryProperties_ } { } @@ -35912,10 +36203,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlags compatibleHandleTypes_ = {}, VULKAN_HPP_NAMESPACE::ExternalFenceFeatureFlags externalFenceFeatures_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , exportFromImportedHandleTypes( exportFromImportedHandleTypes_ ) - , compatibleHandleTypes( compatibleHandleTypes_ ) - , externalFenceFeatures( externalFenceFeatures_ ) + : pNext{ pNext_ } + , exportFromImportedHandleTypes{ exportFromImportedHandleTypes_ } + , compatibleHandleTypes{ compatibleHandleTypes_ } + , externalFenceFeatures{ externalFenceFeatures_ } { } @@ -36006,8 +36297,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExternalFormatANDROID( uint64_t externalFormat_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , externalFormat( externalFormat_ ) + : pNext{ pNext_ } + , externalFormat{ externalFormat_ } { } @@ -36104,8 +36395,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExternalFormatQNX( uint64_t externalFormat_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , externalFormat( externalFormat_ ) + : pNext{ pNext_ } + , externalFormat{ externalFormat_ } { } @@ -36199,8 +36490,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExternalImageFormatProperties( VULKAN_HPP_NAMESPACE::ExternalMemoryProperties externalMemoryProperties_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , externalMemoryProperties( externalMemoryProperties_ ) + : pNext{ pNext_ } + , externalMemoryProperties{ externalMemoryProperties_ } { } @@ -36284,11 +36575,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxArrayLayers_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags sampleCounts_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize maxResourceSize_ = {} ) VULKAN_HPP_NOEXCEPT - : maxExtent( maxExtent_ ) - , maxMipLevels( maxMipLevels_ ) - , maxArrayLayers( maxArrayLayers_ ) - , sampleCounts( sampleCounts_ ) - , maxResourceSize( maxResourceSize_ ) + : maxExtent{ maxExtent_ } + , maxMipLevels{ maxMipLevels_ } + , maxArrayLayers{ maxArrayLayers_ } + , sampleCounts{ sampleCounts_ } + , maxResourceSize{ maxResourceSize_ } { } @@ -36371,10 +36662,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ExternalMemoryFeatureFlagsNV externalMemoryFeatures_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV compatibleHandleTypes_ = {} ) VULKAN_HPP_NOEXCEPT - : imageFormatProperties( imageFormatProperties_ ) - , externalMemoryFeatures( externalMemoryFeatures_ ) - , exportFromImportedHandleTypes( exportFromImportedHandleTypes_ ) - , compatibleHandleTypes( compatibleHandleTypes_ ) + : imageFormatProperties{ imageFormatProperties_ } + , externalMemoryFeatures{ externalMemoryFeatures_ } + , exportFromImportedHandleTypes{ exportFromImportedHandleTypes_ } + , compatibleHandleTypes{ compatibleHandleTypes_ } { } @@ -36455,8 +36746,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExternalMemoryAcquireUnmodifiedEXT( VULKAN_HPP_NAMESPACE::Bool32 acquireUnmodifiedMemory_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , acquireUnmodifiedMemory( acquireUnmodifiedMemory_ ) + : pNext{ pNext_ } + , acquireUnmodifiedMemory{ acquireUnmodifiedMemory_ } { } @@ -36553,8 +36844,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExternalMemoryBufferCreateInfo( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags handleTypes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleTypes( handleTypes_ ) + : pNext{ pNext_ } + , handleTypes{ handleTypes_ } { } @@ -36653,8 +36944,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExternalMemoryImageCreateInfo( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags handleTypes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleTypes( handleTypes_ ) + : pNext{ pNext_ } + , handleTypes{ handleTypes_ } { } @@ -36753,8 +37044,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ExternalMemoryImageCreateInfoNV( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleTypes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleTypes( handleTypes_ ) + : pNext{ pNext_ } + , handleTypes{ handleTypes_ } { } @@ -36853,10 +37144,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlags compatibleHandleTypes_ = {}, VULKAN_HPP_NAMESPACE::ExternalSemaphoreFeatureFlags externalSemaphoreFeatures_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , exportFromImportedHandleTypes( exportFromImportedHandleTypes_ ) - , compatibleHandleTypes( compatibleHandleTypes_ ) - , externalSemaphoreFeatures( externalSemaphoreFeatures_ ) + : pNext{ pNext_ } + , exportFromImportedHandleTypes{ exportFromImportedHandleTypes_ } + , compatibleHandleTypes{ compatibleHandleTypes_ } + , externalSemaphoreFeatures{ externalSemaphoreFeatures_ } { } @@ -36946,8 +37237,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR FenceCreateInfo( VULKAN_HPP_NAMESPACE::FenceCreateFlags flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , flags{ flags_ } { } @@ -37042,9 +37333,9 @@ namespace VULKAN_HPP_NAMESPACE FenceGetFdInfoKHR( VULKAN_HPP_NAMESPACE::Fence fence_ = {}, VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits::eOpaqueFd, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fence( fence_ ) - , handleType( handleType_ ) + : pNext{ pNext_ } + , fence{ fence_ } + , handleType{ handleType_ } { } @@ -37150,9 +37441,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Fence fence_ = {}, VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits::eOpaqueFd, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fence( fence_ ) - , handleType( handleType_ ) + : pNext{ pNext_ } + , fence{ fence_ } + , handleType{ handleType_ } { } @@ -37260,9 +37551,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR FilterCubicImageViewImageFormatPropertiesEXT( VULKAN_HPP_NAMESPACE::Bool32 filterCubic_ = {}, VULKAN_HPP_NAMESPACE::Bool32 filterCubicMinmax_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , filterCubic( filterCubic_ ) - , filterCubicMinmax( filterCubicMinmax_ ) + : pNext{ pNext_ } + , filterCubic{ filterCubic_ } + , filterCubicMinmax{ filterCubicMinmax_ } { } @@ -37343,9 +37634,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR FormatProperties( VULKAN_HPP_NAMESPACE::FormatFeatureFlags linearTilingFeatures_ = {}, VULKAN_HPP_NAMESPACE::FormatFeatureFlags optimalTilingFeatures_ = {}, VULKAN_HPP_NAMESPACE::FormatFeatureFlags bufferFeatures_ = {} ) VULKAN_HPP_NOEXCEPT - : linearTilingFeatures( linearTilingFeatures_ ) - , optimalTilingFeatures( optimalTilingFeatures_ ) - , bufferFeatures( bufferFeatures_ ) + : linearTilingFeatures{ linearTilingFeatures_ } + , optimalTilingFeatures{ optimalTilingFeatures_ } + , bufferFeatures{ bufferFeatures_ } { } @@ -37420,8 +37711,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR FormatProperties2( VULKAN_HPP_NAMESPACE::FormatProperties formatProperties_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , formatProperties( formatProperties_ ) + : pNext{ pNext_ } + , formatProperties{ formatProperties_ } { } @@ -37504,10 +37795,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::FormatFeatureFlags2 optimalTilingFeatures_ = {}, VULKAN_HPP_NAMESPACE::FormatFeatureFlags2 bufferFeatures_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , linearTilingFeatures( linearTilingFeatures_ ) - , optimalTilingFeatures( optimalTilingFeatures_ ) - , bufferFeatures( bufferFeatures_ ) + : pNext{ pNext_ } + , linearTilingFeatures{ linearTilingFeatures_ } + , optimalTilingFeatures{ optimalTilingFeatures_ } + , bufferFeatures{ bufferFeatures_ } { } @@ -37596,9 +37887,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR FragmentShadingRateAttachmentInfoKHR( const VULKAN_HPP_NAMESPACE::AttachmentReference2 * pFragmentShadingRateAttachment_ = {}, VULKAN_HPP_NAMESPACE::Extent2D shadingRateAttachmentTexelSize_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pFragmentShadingRateAttachment( pFragmentShadingRateAttachment_ ) - , shadingRateAttachmentTexelSize( shadingRateAttachmentTexelSize_ ) + : pNext{ pNext_ } + , pFragmentShadingRateAttachment{ pFragmentShadingRateAttachment_ } + , shadingRateAttachmentTexelSize{ shadingRateAttachmentTexelSize_ } { } @@ -37715,16 +38006,16 @@ namespace VULKAN_HPP_NAMESPACE size_t tagSize_ = {}, const void * pTag_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , frameID( frameID_ ) - , imageCount( imageCount_ ) - , pImages( pImages_ ) - , bufferCount( bufferCount_ ) - , pBuffers( pBuffers_ ) - , tagName( tagName_ ) - , tagSize( tagSize_ ) - , pTag( pTag_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , frameID{ frameID_ } + , imageCount{ imageCount_ } + , pImages{ pImages_ } + , bufferCount{ bufferCount_ } + , pBuffers{ pBuffers_ } + , tagName{ tagName_ } + , tagSize{ tagSize_ } + , pTag{ pTag_ } { } @@ -37942,14 +38233,14 @@ namespace VULKAN_HPP_NAMESPACE uint32_t viewFormatCount_ = {}, const VULKAN_HPP_NAMESPACE::Format * pViewFormats_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , usage( usage_ ) - , width( width_ ) - , height( height_ ) - , layerCount( layerCount_ ) - , viewFormatCount( viewFormatCount_ ) - , pViewFormats( pViewFormats_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , usage{ usage_ } + , width{ width_ } + , height{ height_ } + , layerCount{ layerCount_ } + , viewFormatCount{ viewFormatCount_ } + , pViewFormats{ pViewFormats_ } { } @@ -38129,9 +38420,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR FramebufferAttachmentsCreateInfo( uint32_t attachmentImageInfoCount_ = {}, const VULKAN_HPP_NAMESPACE::FramebufferAttachmentImageInfo * pAttachmentImageInfos_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , attachmentImageInfoCount( attachmentImageInfoCount_ ) - , pAttachmentImageInfos( pAttachmentImageInfos_ ) + : pNext{ pNext_ } + , attachmentImageInfoCount{ attachmentImageInfoCount_ } + , pAttachmentImageInfos{ pAttachmentImageInfos_ } { } @@ -38269,14 +38560,14 @@ namespace VULKAN_HPP_NAMESPACE uint32_t height_ = {}, uint32_t layers_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , renderPass( renderPass_ ) - , attachmentCount( attachmentCount_ ) - , pAttachments( pAttachments_ ) - , width( width_ ) - , height( height_ ) - , layers( layers_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , renderPass{ renderPass_ } + , attachmentCount{ attachmentCount_ } + , pAttachments{ pAttachments_ } + , width{ width_ } + , height{ height_ } + , layers{ layers_ } { } @@ -38458,11 +38749,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::SampleCountFlags depthStencilSamples_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags colorSamples_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , coverageReductionMode( coverageReductionMode_ ) - , rasterizationSamples( rasterizationSamples_ ) - , depthStencilSamples( depthStencilSamples_ ) - , colorSamples( colorSamples_ ) + : pNext{ pNext_ } + , coverageReductionMode{ coverageReductionMode_ } + , rasterizationSamples{ rasterizationSamples_ } + , depthStencilSamples{ depthStencilSamples_ } + , colorSamples{ colorSamples_ } { } @@ -38550,8 +38841,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR IndirectCommandsStreamNV( VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {} ) VULKAN_HPP_NOEXCEPT - : buffer( buffer_ ) - , offset( offset_ ) + : buffer{ buffer_ } + , offset{ offset_ } { } @@ -38653,20 +38944,20 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Buffer sequencesIndexBuffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize sequencesIndexOffset_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pipelineBindPoint( pipelineBindPoint_ ) - , pipeline( pipeline_ ) - , indirectCommandsLayout( indirectCommandsLayout_ ) - , streamCount( streamCount_ ) - , pStreams( pStreams_ ) - , sequencesCount( sequencesCount_ ) - , preprocessBuffer( preprocessBuffer_ ) - , preprocessOffset( preprocessOffset_ ) - , preprocessSize( preprocessSize_ ) - , sequencesCountBuffer( sequencesCountBuffer_ ) - , sequencesCountOffset( sequencesCountOffset_ ) - , sequencesIndexBuffer( sequencesIndexBuffer_ ) - , sequencesIndexOffset( sequencesIndexOffset_ ) + : pNext{ pNext_ } + , pipelineBindPoint{ pipelineBindPoint_ } + , pipeline{ pipeline_ } + , indirectCommandsLayout{ indirectCommandsLayout_ } + , streamCount{ streamCount_ } + , pStreams{ pStreams_ } + , sequencesCount{ sequencesCount_ } + , preprocessBuffer{ preprocessBuffer_ } + , preprocessOffset{ preprocessOffset_ } + , preprocessSize{ preprocessSize_ } + , sequencesCountBuffer{ sequencesCountBuffer_ } + , sequencesCountOffset{ sequencesCountOffset_ } + , sequencesIndexBuffer{ sequencesIndexBuffer_ } + , sequencesIndexOffset{ sequencesIndexOffset_ } { } @@ -38926,11 +39217,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout_ = {}, uint32_t maxSequencesCount_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pipelineBindPoint( pipelineBindPoint_ ) - , pipeline( pipeline_ ) - , indirectCommandsLayout( indirectCommandsLayout_ ) - , maxSequencesCount( maxSequencesCount_ ) + : pNext{ pNext_ } + , pipelineBindPoint{ pipelineBindPoint_ } + , pipeline{ pipeline_ } + , indirectCommandsLayout{ indirectCommandsLayout_ } + , maxSequencesCount{ maxSequencesCount_ } { } @@ -39068,21 +39359,21 @@ namespace VULKAN_HPP_NAMESPACE uint64_t gpuRenderStartTimeUs_ = {}, uint64_t gpuRenderEndTimeUs_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , presentID( presentID_ ) - , inputSampleTimeUs( inputSampleTimeUs_ ) - , simStartTimeUs( simStartTimeUs_ ) - , simEndTimeUs( simEndTimeUs_ ) - , renderSubmitStartTimeUs( renderSubmitStartTimeUs_ ) - , renderSubmitEndTimeUs( renderSubmitEndTimeUs_ ) - , presentStartTimeUs( presentStartTimeUs_ ) - , presentEndTimeUs( presentEndTimeUs_ ) - , driverStartTimeUs( driverStartTimeUs_ ) - , driverEndTimeUs( driverEndTimeUs_ ) - , osRenderQueueStartTimeUs( osRenderQueueStartTimeUs_ ) - , osRenderQueueEndTimeUs( osRenderQueueEndTimeUs_ ) - , gpuRenderStartTimeUs( gpuRenderStartTimeUs_ ) - , gpuRenderEndTimeUs( gpuRenderEndTimeUs_ ) + : pNext{ pNext_ } + , presentID{ presentID_ } + , inputSampleTimeUs{ inputSampleTimeUs_ } + , simStartTimeUs{ simStartTimeUs_ } + , simEndTimeUs{ simEndTimeUs_ } + , renderSubmitStartTimeUs{ renderSubmitStartTimeUs_ } + , renderSubmitEndTimeUs{ renderSubmitEndTimeUs_ } + , presentStartTimeUs{ presentStartTimeUs_ } + , presentEndTimeUs{ presentEndTimeUs_ } + , driverStartTimeUs{ driverStartTimeUs_ } + , driverEndTimeUs{ driverEndTimeUs_ } + , osRenderQueueStartTimeUs{ osRenderQueueStartTimeUs_ } + , osRenderQueueEndTimeUs{ osRenderQueueEndTimeUs_ } + , gpuRenderStartTimeUs{ gpuRenderStartTimeUs_ } + , gpuRenderEndTimeUs{ gpuRenderEndTimeUs_ } { } @@ -39213,9 +39504,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR GetLatencyMarkerInfoNV( uint32_t timingCount_ = {}, VULKAN_HPP_NAMESPACE::LatencyTimingsFrameReportNV * pTimings_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , timingCount( timingCount_ ) - , pTimings( pTimings_ ) + : pNext{ pNext_ } + , timingCount{ timingCount_ } + , pTimings{ pTimings_ } { } @@ -39335,9 +39626,9 @@ namespace VULKAN_HPP_NAMESPACE VertexInputBindingDescription( uint32_t binding_ = {}, uint32_t stride_ = {}, VULKAN_HPP_NAMESPACE::VertexInputRate inputRate_ = VULKAN_HPP_NAMESPACE::VertexInputRate::eVertex ) VULKAN_HPP_NOEXCEPT - : binding( binding_ ) - , stride( stride_ ) - , inputRate( inputRate_ ) + : binding{ binding_ } + , stride{ stride_ } + , inputRate{ inputRate_ } { } @@ -39432,10 +39723,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t binding_ = {}, VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, uint32_t offset_ = {} ) VULKAN_HPP_NOEXCEPT - : location( location_ ) - , binding( binding_ ) - , format( format_ ) - , offset( offset_ ) + : location{ location_ } + , binding{ binding_ } + , format{ format_ } + , offset{ offset_ } { } @@ -39542,12 +39833,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t vertexAttributeDescriptionCount_ = {}, const VULKAN_HPP_NAMESPACE::VertexInputAttributeDescription * pVertexAttributeDescriptions_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , vertexBindingDescriptionCount( vertexBindingDescriptionCount_ ) - , pVertexBindingDescriptions( pVertexBindingDescriptions_ ) - , vertexAttributeDescriptionCount( vertexAttributeDescriptionCount_ ) - , pVertexAttributeDescriptions( pVertexAttributeDescriptions_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , vertexBindingDescriptionCount{ vertexBindingDescriptionCount_ } + , pVertexBindingDescriptions{ pVertexBindingDescriptions_ } + , vertexAttributeDescriptionCount{ vertexAttributeDescriptionCount_ } + , pVertexAttributeDescriptions{ pVertexAttributeDescriptions_ } { } @@ -39725,10 +40016,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::PrimitiveTopology topology_ = VULKAN_HPP_NAMESPACE::PrimitiveTopology::ePointList, VULKAN_HPP_NAMESPACE::Bool32 primitiveRestartEnable_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , topology( topology_ ) - , primitiveRestartEnable( primitiveRestartEnable_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , topology{ topology_ } + , primitiveRestartEnable{ primitiveRestartEnable_ } { } @@ -39846,9 +40137,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PipelineTessellationStateCreateInfo( VULKAN_HPP_NAMESPACE::PipelineTessellationStateCreateFlags flags_ = {}, uint32_t patchControlPoints_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , patchControlPoints( patchControlPoints_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , patchControlPoints{ patchControlPoints_ } { } @@ -39959,12 +40250,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t scissorCount_ = {}, const VULKAN_HPP_NAMESPACE::Rect2D * pScissors_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , viewportCount( viewportCount_ ) - , pViewports( pViewports_ ) - , scissorCount( scissorCount_ ) - , pScissors( pScissors_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , viewportCount{ viewportCount_ } + , pViewports{ pViewports_ } + , scissorCount{ scissorCount_ } + , pScissors{ pScissors_ } { } @@ -40140,18 +40431,18 @@ namespace VULKAN_HPP_NAMESPACE float depthBiasSlopeFactor_ = {}, float lineWidth_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , depthClampEnable( depthClampEnable_ ) - , rasterizerDiscardEnable( rasterizerDiscardEnable_ ) - , polygonMode( polygonMode_ ) - , cullMode( cullMode_ ) - , frontFace( frontFace_ ) - , depthBiasEnable( depthBiasEnable_ ) - , depthBiasConstantFactor( depthBiasConstantFactor_ ) - , depthBiasClamp( depthBiasClamp_ ) - , depthBiasSlopeFactor( depthBiasSlopeFactor_ ) - , lineWidth( lineWidth_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , depthClampEnable{ depthClampEnable_ } + , rasterizerDiscardEnable{ rasterizerDiscardEnable_ } + , polygonMode{ polygonMode_ } + , cullMode{ cullMode_ } + , frontFace{ frontFace_ } + , depthBiasEnable{ depthBiasEnable_ } + , depthBiasConstantFactor{ depthBiasConstantFactor_ } + , depthBiasClamp{ depthBiasClamp_ } + , depthBiasSlopeFactor{ depthBiasSlopeFactor_ } + , lineWidth{ lineWidth_ } { } @@ -40353,14 +40644,14 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 alphaToCoverageEnable_ = {}, VULKAN_HPP_NAMESPACE::Bool32 alphaToOneEnable_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , rasterizationSamples( rasterizationSamples_ ) - , sampleShadingEnable( sampleShadingEnable_ ) - , minSampleShading( minSampleShading_ ) - , pSampleMask( pSampleMask_ ) - , alphaToCoverageEnable( alphaToCoverageEnable_ ) - , alphaToOneEnable( alphaToOneEnable_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , rasterizationSamples{ rasterizationSamples_ } + , sampleShadingEnable{ sampleShadingEnable_ } + , minSampleShading{ minSampleShading_ } + , pSampleMask{ pSampleMask_ } + , alphaToCoverageEnable{ alphaToCoverageEnable_ } + , alphaToOneEnable{ alphaToOneEnable_ } { } @@ -40513,13 +40804,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t compareMask_ = {}, uint32_t writeMask_ = {}, uint32_t reference_ = {} ) VULKAN_HPP_NOEXCEPT - : failOp( failOp_ ) - , passOp( passOp_ ) - , depthFailOp( depthFailOp_ ) - , compareOp( compareOp_ ) - , compareMask( compareMask_ ) - , writeMask( writeMask_ ) - , reference( reference_ ) + : failOp{ failOp_ } + , passOp{ passOp_ } + , depthFailOp{ depthFailOp_ } + , compareOp{ compareOp_ } + , compareMask{ compareMask_ } + , writeMask{ writeMask_ } + , reference{ reference_ } { } @@ -40656,17 +40947,17 @@ namespace VULKAN_HPP_NAMESPACE float minDepthBounds_ = {}, float maxDepthBounds_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , depthTestEnable( depthTestEnable_ ) - , depthWriteEnable( depthWriteEnable_ ) - , depthCompareOp( depthCompareOp_ ) - , depthBoundsTestEnable( depthBoundsTestEnable_ ) - , stencilTestEnable( stencilTestEnable_ ) - , front( front_ ) - , back( back_ ) - , minDepthBounds( minDepthBounds_ ) - , maxDepthBounds( maxDepthBounds_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , depthTestEnable{ depthTestEnable_ } + , depthWriteEnable{ depthWriteEnable_ } + , depthCompareOp{ depthCompareOp_ } + , depthBoundsTestEnable{ depthBoundsTestEnable_ } + , stencilTestEnable{ stencilTestEnable_ } + , front{ front_ } + , back{ back_ } + , minDepthBounds{ minDepthBounds_ } + , maxDepthBounds{ maxDepthBounds_ } { } @@ -40855,14 +41146,14 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::BlendFactor dstAlphaBlendFactor_ = VULKAN_HPP_NAMESPACE::BlendFactor::eZero, VULKAN_HPP_NAMESPACE::BlendOp alphaBlendOp_ = VULKAN_HPP_NAMESPACE::BlendOp::eAdd, VULKAN_HPP_NAMESPACE::ColorComponentFlags colorWriteMask_ = {} ) VULKAN_HPP_NOEXCEPT - : blendEnable( blendEnable_ ) - , srcColorBlendFactor( srcColorBlendFactor_ ) - , dstColorBlendFactor( dstColorBlendFactor_ ) - , colorBlendOp( colorBlendOp_ ) - , srcAlphaBlendFactor( srcAlphaBlendFactor_ ) - , dstAlphaBlendFactor( dstAlphaBlendFactor_ ) - , alphaBlendOp( alphaBlendOp_ ) - , colorWriteMask( colorWriteMask_ ) + : blendEnable{ blendEnable_ } + , srcColorBlendFactor{ srcColorBlendFactor_ } + , dstColorBlendFactor{ dstColorBlendFactor_ } + , colorBlendOp{ colorBlendOp_ } + , srcAlphaBlendFactor{ srcAlphaBlendFactor_ } + , dstAlphaBlendFactor{ dstAlphaBlendFactor_ } + , alphaBlendOp{ alphaBlendOp_ } + , colorWriteMask{ colorWriteMask_ } { } @@ -41013,13 +41304,13 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::PipelineColorBlendAttachmentState * pAttachments_ = {}, std::array const & blendConstants_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , logicOpEnable( logicOpEnable_ ) - , logicOp( logicOp_ ) - , attachmentCount( attachmentCount_ ) - , pAttachments( pAttachments_ ) - , blendConstants( blendConstants_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , logicOpEnable{ logicOpEnable_ } + , logicOp{ logicOp_ } + , attachmentCount{ attachmentCount_ } + , pAttachments{ pAttachments_ } + , blendConstants{ blendConstants_ } { } @@ -41191,10 +41482,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t dynamicStateCount_ = {}, const VULKAN_HPP_NAMESPACE::DynamicState * pDynamicStates_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , dynamicStateCount( dynamicStateCount_ ) - , pDynamicStates( pDynamicStates_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , dynamicStateCount{ dynamicStateCount_ } + , pDynamicStates{ pDynamicStates_ } { } @@ -41344,24 +41635,24 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , stageCount( stageCount_ ) - , pStages( pStages_ ) - , pVertexInputState( pVertexInputState_ ) - , pInputAssemblyState( pInputAssemblyState_ ) - , pTessellationState( pTessellationState_ ) - , pViewportState( pViewportState_ ) - , pRasterizationState( pRasterizationState_ ) - , pMultisampleState( pMultisampleState_ ) - , pDepthStencilState( pDepthStencilState_ ) - , pColorBlendState( pColorBlendState_ ) - , pDynamicState( pDynamicState_ ) - , layout( layout_ ) - , renderPass( renderPass_ ) - , subpass( subpass_ ) - , basePipelineHandle( basePipelineHandle_ ) - , basePipelineIndex( basePipelineIndex_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , stageCount{ stageCount_ } + , pStages{ pStages_ } + , pVertexInputState{ pVertexInputState_ } + , pInputAssemblyState{ pInputAssemblyState_ } + , pTessellationState{ pTessellationState_ } + , pViewportState{ pViewportState_ } + , pRasterizationState{ pRasterizationState_ } + , pMultisampleState{ pMultisampleState_ } + , pDepthStencilState{ pDepthStencilState_ } + , pColorBlendState{ pColorBlendState_ } + , pDynamicState{ pDynamicState_ } + , layout{ layout_ } + , renderPass{ renderPass_ } + , subpass{ subpass_ } + , basePipelineHandle{ basePipelineHandle_ } + , basePipelineIndex{ basePipelineIndex_ } { } @@ -41670,8 +41961,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR GraphicsPipelineLibraryCreateInfoEXT( VULKAN_HPP_NAMESPACE::GraphicsPipelineLibraryFlagsEXT flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , flags{ flags_ } { } @@ -41770,11 +42061,11 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::PipelineVertexInputStateCreateInfo * pVertexInputState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineTessellationStateCreateInfo * pTessellationState_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stageCount( stageCount_ ) - , pStages( pStages_ ) - , pVertexInputState( pVertexInputState_ ) - , pTessellationState( pTessellationState_ ) + : pNext{ pNext_ } + , stageCount{ stageCount_ } + , pStages{ pStages_ } + , pVertexInputState{ pVertexInputState_ } + , pTessellationState{ pTessellationState_ } { } @@ -41927,11 +42218,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t pipelineCount_ = {}, const VULKAN_HPP_NAMESPACE::Pipeline * pPipelines_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , groupCount( groupCount_ ) - , pGroups( pGroups_ ) - , pipelineCount( pipelineCount_ ) - , pPipelines( pPipelines_ ) + : pNext{ pNext_ } + , groupCount{ groupCount_ } + , pGroups{ pGroups_ } + , pipelineCount{ pipelineCount_ } + , pPipelines{ pPipelines_ } { } @@ -42085,8 +42376,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR XYColorEXT( float x_ = {}, float y_ = {} ) VULKAN_HPP_NOEXCEPT - : x( x_ ) - , y( y_ ) + : x{ x_ } + , y{ y_ } { } @@ -42179,15 +42470,15 @@ namespace VULKAN_HPP_NAMESPACE float maxContentLightLevel_ = {}, float maxFrameAverageLightLevel_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , displayPrimaryRed( displayPrimaryRed_ ) - , displayPrimaryGreen( displayPrimaryGreen_ ) - , displayPrimaryBlue( displayPrimaryBlue_ ) - , whitePoint( whitePoint_ ) - , maxLuminance( maxLuminance_ ) - , minLuminance( minLuminance_ ) - , maxContentLightLevel( maxContentLightLevel_ ) - , maxFrameAverageLightLevel( maxFrameAverageLightLevel_ ) + : pNext{ pNext_ } + , displayPrimaryRed{ displayPrimaryRed_ } + , displayPrimaryGreen{ displayPrimaryGreen_ } + , displayPrimaryBlue{ displayPrimaryBlue_ } + , whitePoint{ whitePoint_ } + , maxLuminance{ maxLuminance_ } + , minLuminance{ minLuminance_ } + , maxContentLightLevel{ maxContentLightLevel_ } + , maxFrameAverageLightLevel{ maxFrameAverageLightLevel_ } { } @@ -42350,8 +42641,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR HeadlessSurfaceCreateInfoEXT( VULKAN_HPP_NAMESPACE::HeadlessSurfaceCreateFlagsEXT flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , flags{ flags_ } { } @@ -42448,9 +42739,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR HostImageCopyDevicePerformanceQueryEXT( VULKAN_HPP_NAMESPACE::Bool32 optimalDeviceAccess_ = {}, VULKAN_HPP_NAMESPACE::Bool32 identicalMemoryLayout_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , optimalDeviceAccess( optimalDeviceAccess_ ) - , identicalMemoryLayout( identicalMemoryLayout_ ) + : pNext{ pNext_ } + , optimalDeviceAccess{ optimalDeviceAccess_ } + , identicalMemoryLayout{ identicalMemoryLayout_ } { } @@ -42537,11 +42828,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageLayout newLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::ImageSubresourceRange subresourceRange_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , image( image_ ) - , oldLayout( oldLayout_ ) - , newLayout( newLayout_ ) - , subresourceRange( subresourceRange_ ) + : pNext{ pNext_ } + , image{ image_ } + , oldLayout{ oldLayout_ } + , newLayout{ newLayout_ } + , subresourceRange{ subresourceRange_ } { } @@ -42667,9 +42958,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR IOSSurfaceCreateInfoMVK( VULKAN_HPP_NAMESPACE::IOSSurfaceCreateFlagsMVK flags_ = {}, const void * pView_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , pView( pView_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , pView{ pView_ } { } @@ -42763,6 +43054,102 @@ namespace VULKAN_HPP_NAMESPACE }; #endif /*VK_USE_PLATFORM_IOS_MVK*/ + struct ImageAlignmentControlCreateInfoMESA + { + using NativeType = VkImageAlignmentControlCreateInfoMESA; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageAlignmentControlCreateInfoMESA; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageAlignmentControlCreateInfoMESA( uint32_t maximumRequestedAlignment_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , maximumRequestedAlignment{ maximumRequestedAlignment_ } + { + } + + VULKAN_HPP_CONSTEXPR ImageAlignmentControlCreateInfoMESA( ImageAlignmentControlCreateInfoMESA const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageAlignmentControlCreateInfoMESA( VkImageAlignmentControlCreateInfoMESA const & rhs ) VULKAN_HPP_NOEXCEPT + : ImageAlignmentControlCreateInfoMESA( *reinterpret_cast( &rhs ) ) + { + } + + ImageAlignmentControlCreateInfoMESA & operator=( ImageAlignmentControlCreateInfoMESA const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + ImageAlignmentControlCreateInfoMESA & operator=( VkImageAlignmentControlCreateInfoMESA const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 ImageAlignmentControlCreateInfoMESA & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 ImageAlignmentControlCreateInfoMESA & setMaximumRequestedAlignment( uint32_t maximumRequestedAlignment_ ) VULKAN_HPP_NOEXCEPT + { + maximumRequestedAlignment = maximumRequestedAlignment_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkImageAlignmentControlCreateInfoMESA const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageAlignmentControlCreateInfoMESA &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, maximumRequestedAlignment ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( ImageAlignmentControlCreateInfoMESA const & ) const = default; +#else + bool operator==( ImageAlignmentControlCreateInfoMESA const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( maximumRequestedAlignment == rhs.maximumRequestedAlignment ); +# endif + } + + bool operator!=( ImageAlignmentControlCreateInfoMESA const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageAlignmentControlCreateInfoMESA; + const void * pNext = {}; + uint32_t maximumRequestedAlignment = {}; + }; + + template <> + struct CppType + { + using Type = ImageAlignmentControlCreateInfoMESA; + }; + struct ImageBlit { using NativeType = VkImageBlit; @@ -42772,10 +43159,10 @@ namespace VULKAN_HPP_NAMESPACE std::array const & srcOffsets_ = {}, VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource_ = {}, std::array const & dstOffsets_ = {} ) VULKAN_HPP_NOEXCEPT - : srcSubresource( srcSubresource_ ) - , srcOffsets( srcOffsets_ ) - , dstSubresource( dstSubresource_ ) - , dstOffsets( dstOffsets_ ) + : srcSubresource{ srcSubresource_ } + , srcOffsets{ srcOffsets_ } + , dstSubresource{ dstSubresource_ } + , dstOffsets{ dstOffsets_ } { } @@ -42878,8 +43265,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImageCaptureDescriptorDataInfoEXT( VULKAN_HPP_NAMESPACE::Image image_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , image( image_ ) + : pNext{ pNext_ } + , image{ image_ } { } @@ -42977,10 +43364,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t compressionControlPlaneCount_ = {}, VULKAN_HPP_NAMESPACE::ImageCompressionFixedRateFlagsEXT * pFixedRateFlags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , compressionControlPlaneCount( compressionControlPlaneCount_ ) - , pFixedRateFlags( pFixedRateFlags_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , compressionControlPlaneCount{ compressionControlPlaneCount_ } + , pFixedRateFlags{ pFixedRateFlags_ } { } @@ -43119,9 +43506,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ImageCompressionPropertiesEXT( VULKAN_HPP_NAMESPACE::ImageCompressionFlagsEXT imageCompressionFlags_ = {}, VULKAN_HPP_NAMESPACE::ImageCompressionFixedRateFlagsEXT imageCompressionFixedRateFlags_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageCompressionFlags( imageCompressionFlags_ ) - , imageCompressionFixedRateFlags( imageCompressionFixedRateFlags_ ) + : pNext{ pNext_ } + , imageCompressionFlags{ imageCompressionFlags_ } + , imageCompressionFixedRateFlags{ imageCompressionFixedRateFlags_ } { } @@ -43214,13 +43601,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t colorSpaceCount_ = {}, const VULKAN_HPP_NAMESPACE::SysmemColorSpaceFUCHSIA * pColorSpaces_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageCreateInfo( imageCreateInfo_ ) - , requiredFormatFeatures( requiredFormatFeatures_ ) - , flags( flags_ ) - , sysmemPixelFormat( sysmemPixelFormat_ ) - , colorSpaceCount( colorSpaceCount_ ) - , pColorSpaces( pColorSpaces_ ) + : pNext{ pNext_ } + , imageCreateInfo{ imageCreateInfo_ } + , requiredFormatFeatures{ requiredFormatFeatures_ } + , flags{ flags_ } + , sysmemPixelFormat{ sysmemPixelFormat_ } + , colorSpaceCount{ colorSpaceCount_ } + , pColorSpaces{ pColorSpaces_ } { } @@ -43396,11 +43783,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::BufferCollectionConstraintsInfoFUCHSIA bufferCollectionConstraints_ = {}, VULKAN_HPP_NAMESPACE::ImageConstraintsInfoFlagsFUCHSIA flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , formatConstraintsCount( formatConstraintsCount_ ) - , pFormatConstraints( pFormatConstraints_ ) - , bufferCollectionConstraints( bufferCollectionConstraints_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , formatConstraintsCount{ formatConstraintsCount_ } + , pFormatConstraints{ pFormatConstraints_ } + , bufferCollectionConstraints{ bufferCollectionConstraints_ } + , flags{ flags_ } { } @@ -43552,11 +43939,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D dstOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D extent_ = {} ) VULKAN_HPP_NOEXCEPT - : srcSubresource( srcSubresource_ ) - , srcOffset( srcOffset_ ) - , dstSubresource( dstSubresource_ ) - , dstOffset( dstOffset_ ) - , extent( extent_ ) + : srcSubresource{ srcSubresource_ } + , srcOffset{ srcOffset_ } + , dstSubresource{ dstSubresource_ } + , dstOffset{ dstOffset_ } + , extent{ extent_ } { } @@ -43668,11 +44055,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize rowPitch_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize arrayPitch_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize depthPitch_ = {} ) VULKAN_HPP_NOEXCEPT - : offset( offset_ ) - , size( size_ ) - , rowPitch( rowPitch_ ) - , arrayPitch( arrayPitch_ ) - , depthPitch( depthPitch_ ) + : offset{ offset_ } + , size{ size_ } + , rowPitch{ rowPitch_ } + , arrayPitch{ arrayPitch_ } + , depthPitch{ depthPitch_ } { } @@ -43786,10 +44173,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t drmFormatModifierPlaneCount_ = {}, const VULKAN_HPP_NAMESPACE::SubresourceLayout * pPlaneLayouts_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , drmFormatModifier( drmFormatModifier_ ) - , drmFormatModifierPlaneCount( drmFormatModifierPlaneCount_ ) - , pPlaneLayouts( pPlaneLayouts_ ) + : pNext{ pNext_ } + , drmFormatModifier{ drmFormatModifier_ } + , drmFormatModifierPlaneCount{ drmFormatModifierPlaneCount_ } + , pPlaneLayouts{ pPlaneLayouts_ } { } @@ -43930,9 +44317,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ImageDrmFormatModifierListCreateInfoEXT( uint32_t drmFormatModifierCount_ = {}, const uint64_t * pDrmFormatModifiers_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , drmFormatModifierCount( drmFormatModifierCount_ ) - , pDrmFormatModifiers( pDrmFormatModifiers_ ) + : pNext{ pNext_ } + , drmFormatModifierCount{ drmFormatModifierCount_ } + , pDrmFormatModifiers{ pDrmFormatModifiers_ } { } @@ -44053,8 +44440,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImageDrmFormatModifierPropertiesEXT( uint64_t drmFormatModifier_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , drmFormatModifier( drmFormatModifier_ ) + : pNext{ pNext_ } + , drmFormatModifier{ drmFormatModifier_ } { } @@ -44137,9 +44524,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ImageFormatListCreateInfo( uint32_t viewFormatCount_ = {}, const VULKAN_HPP_NAMESPACE::Format * pViewFormats_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , viewFormatCount( viewFormatCount_ ) - , pViewFormats( pViewFormats_ ) + : pNext{ pNext_ } + , viewFormatCount{ viewFormatCount_ } + , pViewFormats{ pViewFormats_ } { } @@ -44262,8 +44649,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImageFormatProperties2( VULKAN_HPP_NAMESPACE::ImageFormatProperties imageFormatProperties_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageFormatProperties( imageFormatProperties_ ) + : pNext{ pNext_ } + , imageFormatProperties{ imageFormatProperties_ } { } @@ -44354,15 +44741,15 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Image image_ = {}, VULKAN_HPP_NAMESPACE::ImageSubresourceRange subresourceRange_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcAccessMask( srcAccessMask_ ) - , dstAccessMask( dstAccessMask_ ) - , oldLayout( oldLayout_ ) - , newLayout( newLayout_ ) - , srcQueueFamilyIndex( srcQueueFamilyIndex_ ) - , dstQueueFamilyIndex( dstQueueFamilyIndex_ ) - , image( image_ ) - , subresourceRange( subresourceRange_ ) + : pNext{ pNext_ } + , srcAccessMask{ srcAccessMask_ } + , dstAccessMask{ dstAccessMask_ } + , oldLayout{ oldLayout_ } + , newLayout{ newLayout_ } + , srcQueueFamilyIndex{ srcQueueFamilyIndex_ } + , dstQueueFamilyIndex{ dstQueueFamilyIndex_ } + , image{ image_ } + , subresourceRange{ subresourceRange_ } { } @@ -44515,8 +44902,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImageMemoryRequirementsInfo2( VULKAN_HPP_NAMESPACE::Image image_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , image( image_ ) + : pNext{ pNext_ } + , image{ image_ } { } @@ -44616,9 +45003,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ImagePipeSurfaceCreateInfoFUCHSIA( VULKAN_HPP_NAMESPACE::ImagePipeSurfaceCreateFlagsFUCHSIA flags_ = {}, zx_handle_t imagePipeHandle_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , imagePipeHandle( imagePipeHandle_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , imagePipeHandle{ imagePipeHandle_ } { } @@ -44735,8 +45122,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ImagePlaneMemoryRequirementsInfo( VULKAN_HPP_NAMESPACE::ImageAspectFlagBits planeAspect_ = VULKAN_HPP_NAMESPACE::ImageAspectFlagBits::eColor, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , planeAspect( planeAspect_ ) + : pNext{ pNext_ } + , planeAspect{ planeAspect_ } { } @@ -44834,11 +45221,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D dstOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D extent_ = {} ) VULKAN_HPP_NOEXCEPT - : srcSubresource( srcSubresource_ ) - , srcOffset( srcOffset_ ) - , dstSubresource( dstSubresource_ ) - , dstOffset( dstOffset_ ) - , extent( extent_ ) + : srcSubresource{ srcSubresource_ } + , srcOffset{ srcOffset_ } + , dstSubresource{ dstSubresource_ } + , dstOffset{ dstOffset_ } + , extent{ extent_ } { } @@ -44954,12 +45341,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Offset3D dstOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D extent_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcSubresource( srcSubresource_ ) - , srcOffset( srcOffset_ ) - , dstSubresource( dstSubresource_ ) - , dstOffset( dstOffset_ ) - , extent( extent_ ) + : pNext{ pNext_ } + , srcSubresource{ srcSubresource_ } + , srcOffset{ srcOffset_ } + , dstSubresource{ dstSubresource_ } + , dstOffset{ dstOffset_ } + , extent{ extent_ } { } @@ -45088,8 +45475,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImageSparseMemoryRequirementsInfo2( VULKAN_HPP_NAMESPACE::Image image_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , image( image_ ) + : pNext{ pNext_ } + , image{ image_ } { } @@ -45187,8 +45574,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImageStencilUsageCreateInfo( VULKAN_HPP_NAMESPACE::ImageUsageFlags stencilUsage_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stencilUsage( stencilUsage_ ) + : pNext{ pNext_ } + , stencilUsage{ stencilUsage_ } { } @@ -45285,8 +45672,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImageSwapchainCreateInfoKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , swapchain( swapchain_ ) + : pNext{ pNext_ } + , swapchain{ swapchain_ } { } @@ -45382,8 +45769,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImageViewASTCDecodeModeEXT( VULKAN_HPP_NAMESPACE::Format decodeMode_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , decodeMode( decodeMode_ ) + : pNext{ pNext_ } + , decodeMode{ decodeMode_ } { } @@ -45480,9 +45867,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ImageViewAddressPropertiesNVX( VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceAddress( deviceAddress_ ) - , size( size_ ) + : pNext{ pNext_ } + , deviceAddress{ deviceAddress_ } + , size{ size_ } { } @@ -45566,8 +45953,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImageViewCaptureDescriptorDataInfoEXT( VULKAN_HPP_NAMESPACE::ImageView imageView_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageView( imageView_ ) + : pNext{ pNext_ } + , imageView{ imageView_ } { } @@ -45668,13 +46055,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ComponentMapping components_ = {}, VULKAN_HPP_NAMESPACE::ImageSubresourceRange subresourceRange_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , image( image_ ) - , viewType( viewType_ ) - , format( format_ ) - , components( components_ ) - , subresourceRange( subresourceRange_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , image{ image_ } + , viewType{ viewType_ } + , format{ format_ } + , components{ components_ } + , subresourceRange{ subresourceRange_ } { } @@ -45815,10 +46202,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DescriptorType descriptorType_ = VULKAN_HPP_NAMESPACE::DescriptorType::eSampler, VULKAN_HPP_NAMESPACE::Sampler sampler_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageView( imageView_ ) - , descriptorType( descriptorType_ ) - , sampler( sampler_ ) + : pNext{ pNext_ } + , imageView{ imageView_ } + , descriptorType{ descriptorType_ } + , sampler{ sampler_ } { } @@ -45932,8 +46319,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImageViewMinLodCreateInfoEXT( float minLod_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , minLod( minLod_ ) + : pNext{ pNext_ } + , minLod{ minLod_ } { } @@ -46031,10 +46418,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Extent2D filterSize_ = {}, uint32_t numPhases_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , filterCenter( filterCenter_ ) - , filterSize( filterSize_ ) - , numPhases( numPhases_ ) + : pNext{ pNext_ } + , filterCenter{ filterCenter_ } + , filterSize{ filterSize_ } + , numPhases{ numPhases_ } { } @@ -46149,9 +46536,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImageViewSlicedCreateInfoEXT( uint32_t sliceOffset_ = {}, uint32_t sliceCount_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , sliceOffset( sliceOffset_ ) - , sliceCount( sliceCount_ ) + : pNext{ pNext_ } + , sliceOffset{ sliceOffset_ } + , sliceCount{ sliceCount_ } { } @@ -46253,8 +46640,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImageViewUsageCreateInfo( VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , usage( usage_ ) + : pNext{ pNext_ } + , usage{ usage_ } { } @@ -46352,8 +46739,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImportAndroidHardwareBufferInfoANDROID( struct AHardwareBuffer * buffer_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , buffer( buffer_ ) + : pNext{ pNext_ } + , buffer{ buffer_ } { } @@ -46454,11 +46841,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits::eOpaqueFd, int fd_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fence( fence_ ) - , flags( flags_ ) - , handleType( handleType_ ) - , fd( fd_ ) + : pNext{ pNext_ } + , fence{ fence_ } + , flags{ flags_ } + , handleType{ handleType_ } + , fd{ fd_ } { } @@ -46587,12 +46974,12 @@ namespace VULKAN_HPP_NAMESPACE HANDLE handle_ = {}, LPCWSTR name_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fence( fence_ ) - , flags( flags_ ) - , handleType( handleType_ ) - , handle( handle_ ) - , name( name_ ) + : pNext{ pNext_ } + , fence{ fence_ } + , flags{ flags_ } + , handleType{ handleType_ } + , handle{ handle_ } + , name{ name_ } { } @@ -46727,9 +47114,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ImportMemoryBufferCollectionFUCHSIA( VULKAN_HPP_NAMESPACE::BufferCollectionFUCHSIA collection_ = {}, uint32_t index_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , collection( collection_ ) - , index( index_ ) + : pNext{ pNext_ } + , collection{ collection_ } + , index{ index_ } { } @@ -46835,9 +47222,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd, int fd_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleType( handleType_ ) - , fd( fd_ ) + : pNext{ pNext_ } + , handleType{ handleType_ } + , fd{ fd_ } { } @@ -46942,9 +47329,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd, void * pHostPointer_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleType( handleType_ ) - , pHostPointer( pHostPointer_ ) + : pNext{ pNext_ } + , handleType{ handleType_ } + , pHostPointer{ pHostPointer_ } { } @@ -47053,10 +47440,10 @@ namespace VULKAN_HPP_NAMESPACE HANDLE handle_ = {}, LPCWSTR name_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleType( handleType_ ) - , handle( handle_ ) - , name( name_ ) + : pNext{ pNext_ } + , handleType{ handleType_ } + , handle{ handle_ } + , name{ name_ } { } @@ -47174,9 +47561,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ImportMemoryWin32HandleInfoNV( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType_ = {}, HANDLE handle_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleType( handleType_ ) - , handle( handle_ ) + : pNext{ pNext_ } + , handleType{ handleType_ } + , handle{ handle_ } { } @@ -47284,9 +47671,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd, zx_handle_t handle_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleType( handleType_ ) - , handle( handle_ ) + : pNext{ pNext_ } + , handleType{ handleType_ } + , handle{ handle_ } { } @@ -47403,8 +47790,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImportMetalBufferInfoEXT( MTLBuffer_id mtlBuffer_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , mtlBuffer( mtlBuffer_ ) + : pNext{ pNext_ } + , mtlBuffer{ mtlBuffer_ } { } @@ -47501,8 +47888,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImportMetalIOSurfaceInfoEXT( IOSurfaceRef ioSurface_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , ioSurface( ioSurface_ ) + : pNext{ pNext_ } + , ioSurface{ ioSurface_ } { } @@ -47599,8 +47986,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImportMetalSharedEventInfoEXT( MTLSharedEvent_id mtlSharedEvent_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , mtlSharedEvent( mtlSharedEvent_ ) + : pNext{ pNext_ } + , mtlSharedEvent{ mtlSharedEvent_ } { } @@ -47699,9 +48086,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ImportMetalTextureInfoEXT( VULKAN_HPP_NAMESPACE::ImageAspectFlagBits plane_ = VULKAN_HPP_NAMESPACE::ImageAspectFlagBits::eColor, MTLTexture_id mtlTexture_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , plane( plane_ ) - , mtlTexture( mtlTexture_ ) + : pNext{ pNext_ } + , plane{ plane_ } + , mtlTexture{ mtlTexture_ } { } @@ -47805,8 +48192,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ImportScreenBufferInfoQNX( struct _screen_buffer * buffer_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , buffer( buffer_ ) + : pNext{ pNext_ } + , buffer{ buffer_ } { } @@ -47907,11 +48294,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd, int fd_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , semaphore( semaphore_ ) - , flags( flags_ ) - , handleType( handleType_ ) - , fd( fd_ ) + : pNext{ pNext_ } + , semaphore{ semaphore_ } + , flags{ flags_ } + , handleType{ handleType_ } + , fd{ fd_ } { } @@ -48041,12 +48428,12 @@ namespace VULKAN_HPP_NAMESPACE HANDLE handle_ = {}, LPCWSTR name_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , semaphore( semaphore_ ) - , flags( flags_ ) - , handleType( handleType_ ) - , handle( handle_ ) - , name( name_ ) + : pNext{ pNext_ } + , semaphore{ semaphore_ } + , flags{ flags_ } + , handleType{ handleType_ } + , handle{ handle_ } + , name{ name_ } { } @@ -48184,11 +48571,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd, zx_handle_t zirconHandle_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , semaphore( semaphore_ ) - , flags( flags_ ) - , handleType( handleType_ ) - , zirconHandle( zirconHandle_ ) + : pNext{ pNext_ } + , semaphore{ semaphore_ } + , flags{ flags_ } + , handleType{ handleType_ } + , zirconHandle{ zirconHandle_ } { } @@ -48338,20 +48725,20 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::IndexType * pIndexTypes_ = {}, const uint32_t * pIndexTypeValues_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , tokenType( tokenType_ ) - , stream( stream_ ) - , offset( offset_ ) - , vertexBindingUnit( vertexBindingUnit_ ) - , vertexDynamicStride( vertexDynamicStride_ ) - , pushconstantPipelineLayout( pushconstantPipelineLayout_ ) - , pushconstantShaderStageFlags( pushconstantShaderStageFlags_ ) - , pushconstantOffset( pushconstantOffset_ ) - , pushconstantSize( pushconstantSize_ ) - , indirectStateFlags( indirectStateFlags_ ) - , indexTypeCount( indexTypeCount_ ) - , pIndexTypes( pIndexTypes_ ) - , pIndexTypeValues( pIndexTypeValues_ ) + : pNext{ pNext_ } + , tokenType{ tokenType_ } + , stream{ stream_ } + , offset{ offset_ } + , vertexBindingUnit{ vertexBindingUnit_ } + , vertexDynamicStride{ vertexDynamicStride_ } + , pushconstantPipelineLayout{ pushconstantPipelineLayout_ } + , pushconstantShaderStageFlags{ pushconstantShaderStageFlags_ } + , pushconstantOffset{ pushconstantOffset_ } + , pushconstantSize{ pushconstantSize_ } + , indirectStateFlags{ indirectStateFlags_ } + , indexTypeCount{ indexTypeCount_ } + , pIndexTypes{ pIndexTypes_ } + , pIndexTypeValues{ pIndexTypeValues_ } { } @@ -48634,13 +49021,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t streamCount_ = {}, const uint32_t * pStreamStrides_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , pipelineBindPoint( pipelineBindPoint_ ) - , tokenCount( tokenCount_ ) - , pTokens( pTokens_ ) - , streamCount( streamCount_ ) - , pStreamStrides( pStreamStrides_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , pipelineBindPoint{ pipelineBindPoint_ } + , tokenCount{ tokenCount_ } + , pTokens{ pTokens_ } + , streamCount{ streamCount_ } + , pStreamStrides{ pStreamStrides_ } { } @@ -48818,8 +49205,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR InitializePerformanceApiInfoINTEL( void * pUserData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pUserData( pUserData_ ) + : pNext{ pNext_ } + , pUserData{ pUserData_ } { } @@ -48913,9 +49300,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR InputAttachmentAspectReference( uint32_t subpass_ = {}, uint32_t inputAttachmentIndex_ = {}, VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ = {} ) VULKAN_HPP_NOEXCEPT - : subpass( subpass_ ) - , inputAttachmentIndex( inputAttachmentIndex_ ) - , aspectMask( aspectMask_ ) + : subpass{ subpass_ } + , inputAttachmentIndex{ inputAttachmentIndex_ } + , aspectMask{ aspectMask_ } { } @@ -49018,13 +49405,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t enabledExtensionCount_ = {}, const char * const * ppEnabledExtensionNames_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , pApplicationInfo( pApplicationInfo_ ) - , enabledLayerCount( enabledLayerCount_ ) - , ppEnabledLayerNames( ppEnabledLayerNames_ ) - , enabledExtensionCount( enabledExtensionCount_ ) - , ppEnabledExtensionNames( ppEnabledExtensionNames_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , pApplicationInfo{ pApplicationInfo_ } + , enabledLayerCount{ enabledLayerCount_ } + , ppEnabledLayerNames{ ppEnabledLayerNames_ } + , enabledExtensionCount{ enabledExtensionCount_ } + , ppEnabledExtensionNames{ ppEnabledExtensionNames_ } { } @@ -49230,9 +49617,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR LatencySleepInfoNV( VULKAN_HPP_NAMESPACE::Semaphore signalSemaphore_ = {}, uint64_t value_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , signalSemaphore( signalSemaphore_ ) - , value( value_ ) + : pNext{ pNext_ } + , signalSemaphore{ signalSemaphore_ } + , value{ value_ } { } @@ -49334,10 +49721,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 lowLatencyBoost_ = {}, uint32_t minimumIntervalUs_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , lowLatencyMode( lowLatencyMode_ ) - , lowLatencyBoost( lowLatencyBoost_ ) - , minimumIntervalUs( minimumIntervalUs_ ) + : pNext{ pNext_ } + , lowLatencyMode{ lowLatencyMode_ } + , lowLatencyBoost{ lowLatencyBoost_ } + , minimumIntervalUs{ minimumIntervalUs_ } { } @@ -49451,8 +49838,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR LatencySubmissionPresentIdNV( uint64_t presentID_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , presentID( presentID_ ) + : pNext{ pNext_ } + , presentID{ presentID_ } { } @@ -49549,9 +49936,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR LatencySurfaceCapabilitiesNV( uint32_t presentModeCount_ = {}, VULKAN_HPP_NAMESPACE::PresentModeKHR * pPresentModes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , presentModeCount( presentModeCount_ ) - , pPresentModes( pPresentModes_ ) + : pNext{ pNext_ } + , presentModeCount{ presentModeCount_ } + , pPresentModes{ pPresentModes_ } { } @@ -49671,10 +50058,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t specVersion_ = {}, uint32_t implementationVersion_ = {}, std::array const & description_ = {} ) VULKAN_HPP_NOEXCEPT - : layerName( layerName_ ) - , specVersion( specVersion_ ) - , implementationVersion( implementationVersion_ ) - , description( description_ ) + : layerName{ layerName_ } + , specVersion{ specVersion_ } + , implementationVersion{ implementationVersion_ } + , description{ description_ } { } @@ -49682,26 +50069,6 @@ namespace VULKAN_HPP_NAMESPACE LayerProperties( VkLayerProperties const & rhs ) VULKAN_HPP_NOEXCEPT : LayerProperties( *reinterpret_cast( &rhs ) ) {} -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - LayerProperties( std::string const & layerName_, uint32_t specVersion_ = {}, uint32_t implementationVersion_ = {}, std::string const & description_ = {} ) - : specVersion( specVersion_ ), implementationVersion( implementationVersion_ ) - { - VULKAN_HPP_ASSERT( layerName_.size() < VK_MAX_EXTENSION_NAME_SIZE ); -# if defined( WIN32 ) - strncpy_s( layerName, VK_MAX_EXTENSION_NAME_SIZE, layerName_.data(), layerName_.size() ); -# else - strncpy( layerName, layerName_.data(), std::min( VK_MAX_EXTENSION_NAME_SIZE, layerName_.size() ) ); -# endif - - VULKAN_HPP_ASSERT( description_.size() < VK_MAX_DESCRIPTION_SIZE ); -# if defined( WIN32 ) - strncpy_s( description, VK_MAX_DESCRIPTION_SIZE, description_.data(), description_.size() ); -# else - strncpy( description, description_.data(), std::min( VK_MAX_DESCRIPTION_SIZE, description_.size() ) ); -# endif - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - LayerProperties & operator=( LayerProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -49780,11 +50147,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::LayerSettingTypeEXT type_ = VULKAN_HPP_NAMESPACE::LayerSettingTypeEXT::eBool32, uint32_t valueCount_ = {}, const void * pValues_ = {} ) VULKAN_HPP_NOEXCEPT - : pLayerName( pLayerName_ ) - , pSettingName( pSettingName_ ) - , type( type_ ) - , valueCount( valueCount_ ) - , pValues( pValues_ ) + : pLayerName{ pLayerName_ } + , pSettingName{ pSettingName_ } + , type{ type_ } + , valueCount{ valueCount_ } + , pValues{ pValues_ } { } @@ -49793,17 +50160,96 @@ namespace VULKAN_HPP_NAMESPACE LayerSettingEXT( VkLayerSettingEXT const & rhs ) VULKAN_HPP_NOEXCEPT : LayerSettingEXT( *reinterpret_cast( &rhs ) ) {} # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - template - LayerSettingEXT( const char * pLayerName_, - const char * pSettingName_, - VULKAN_HPP_NAMESPACE::LayerSettingTypeEXT type_, - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_ ) + // NOTE: you need to provide the type because vk::Bool32 and uint32_t are indistinguishable! + LayerSettingEXT( char const * pLayerName_, + char const * pSettingName_, + VULKAN_HPP_NAMESPACE::LayerSettingTypeEXT type_, + vk::ArrayProxyNoTemporaries const & values_ ) : pLayerName( pLayerName_ ) , pSettingName( pSettingName_ ) , type( type_ ) - , valueCount( static_cast( values_.size() * sizeof( T ) ) ) + , valueCount( static_cast( values_.size() ) ) , pValues( values_.data() ) { + VULKAN_HPP_ASSERT( VULKAN_HPP_NAMESPACE::isSameType( type ) ); + } + + LayerSettingEXT( char const * pLayerName_, + char const * pSettingName_, + VULKAN_HPP_NAMESPACE::LayerSettingTypeEXT type_, + vk::ArrayProxyNoTemporaries const & values_ ) + : pLayerName( pLayerName_ ) + , pSettingName( pSettingName_ ) + , type( type_ ) + , valueCount( static_cast( values_.size() ) ) + , pValues( values_.data() ) + { + VULKAN_HPP_ASSERT( VULKAN_HPP_NAMESPACE::isSameType( type ) ); + } + + LayerSettingEXT( char const * pLayerName_, + char const * pSettingName_, + VULKAN_HPP_NAMESPACE::LayerSettingTypeEXT type_, + vk::ArrayProxyNoTemporaries const & values_ ) + : pLayerName( pLayerName_ ) + , pSettingName( pSettingName_ ) + , type( type_ ) + , valueCount( static_cast( values_.size() ) ) + , pValues( values_.data() ) + { + VULKAN_HPP_ASSERT( VULKAN_HPP_NAMESPACE::isSameType( type ) ); + } + + LayerSettingEXT( char const * pLayerName_, + char const * pSettingName_, + VULKAN_HPP_NAMESPACE::LayerSettingTypeEXT type_, + vk::ArrayProxyNoTemporaries const & values_ ) + : pLayerName( pLayerName_ ) + , pSettingName( pSettingName_ ) + , type( type_ ) + , valueCount( static_cast( values_.size() ) ) + , pValues( values_.data() ) + { + VULKAN_HPP_ASSERT( VULKAN_HPP_NAMESPACE::isSameType( type ) ); + } + + LayerSettingEXT( char const * pLayerName_, + char const * pSettingName_, + VULKAN_HPP_NAMESPACE::LayerSettingTypeEXT type_, + vk::ArrayProxyNoTemporaries const & values_ ) + : pLayerName( pLayerName_ ) + , pSettingName( pSettingName_ ) + , type( type_ ) + , valueCount( static_cast( values_.size() ) ) + , pValues( values_.data() ) + { + VULKAN_HPP_ASSERT( VULKAN_HPP_NAMESPACE::isSameType( type ) ); + } + + LayerSettingEXT( char const * pLayerName_, + char const * pSettingName_, + VULKAN_HPP_NAMESPACE::LayerSettingTypeEXT type_, + vk::ArrayProxyNoTemporaries const & values_ ) + : pLayerName( pLayerName_ ) + , pSettingName( pSettingName_ ) + , type( type_ ) + , valueCount( static_cast( values_.size() ) ) + , pValues( values_.data() ) + { + VULKAN_HPP_ASSERT( VULKAN_HPP_NAMESPACE::isSameType( type ) ); + } + + LayerSettingEXT( char const * pLayerName_, + char const * pSettingName_, + VULKAN_HPP_NAMESPACE::LayerSettingTypeEXT type_, + vk::ArrayProxyNoTemporaries const & values_ ) + : pLayerName( pLayerName_ ) + , pSettingName( pSettingName_ ) + , type( type_ ) + , valueCount( static_cast( values_.size() ) ) + , pValues( values_.data() ) + { + VULKAN_HPP_ASSERT( VULKAN_HPP_NAMESPACE::isSameType( type ) ); } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ @@ -49841,17 +50287,52 @@ namespace VULKAN_HPP_NAMESPACE return *this; } - VULKAN_HPP_CONSTEXPR_14 LayerSettingEXT & setPValues( const void * pValues_ ) VULKAN_HPP_NOEXCEPT +# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) + LayerSettingEXT & setValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_ ) VULKAN_HPP_NOEXCEPT { - pValues = pValues_; + valueCount = static_cast( values_.size() ); + pValues = values_.data(); return *this; } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - template - LayerSettingEXT & setValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_ ) VULKAN_HPP_NOEXCEPT + LayerSettingEXT & setValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_ ) VULKAN_HPP_NOEXCEPT { - valueCount = static_cast( values_.size() * sizeof( T ) ); + valueCount = static_cast( values_.size() ); + pValues = values_.data(); + return *this; + } + + LayerSettingEXT & setValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_ ) VULKAN_HPP_NOEXCEPT + { + valueCount = static_cast( values_.size() ); + pValues = values_.data(); + return *this; + } + + LayerSettingEXT & setValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_ ) VULKAN_HPP_NOEXCEPT + { + valueCount = static_cast( values_.size() ); + pValues = values_.data(); + return *this; + } + + LayerSettingEXT & setValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_ ) VULKAN_HPP_NOEXCEPT + { + valueCount = static_cast( values_.size() ); + pValues = values_.data(); + return *this; + } + + LayerSettingEXT & setValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_ ) VULKAN_HPP_NOEXCEPT + { + valueCount = static_cast( values_.size() ); + pValues = values_.data(); + return *this; + } + + LayerSettingEXT & setValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_ ) VULKAN_HPP_NOEXCEPT + { + valueCount = static_cast( values_.size() ); pValues = values_.data(); return *this; } @@ -49931,9 +50412,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR LayerSettingsCreateInfoEXT( uint32_t settingCount_ = {}, const VULKAN_HPP_NAMESPACE::LayerSettingEXT * pSettings_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , settingCount( settingCount_ ) - , pSettings( pSettings_ ) + : pNext{ pNext_ } + , settingCount{ settingCount_ } + , pSettings{ pSettings_ } { } @@ -50056,9 +50537,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR MacOSSurfaceCreateInfoMVK( VULKAN_HPP_NAMESPACE::MacOSSurfaceCreateFlagsMVK flags_ = {}, const void * pView_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , pView( pView_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , pView{ pView_ } { } @@ -50165,10 +50646,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memory( memory_ ) - , offset( offset_ ) - , size( size_ ) + : pNext{ pNext_ } + , memory{ memory_ } + , offset{ offset_ } + , size{ size_ } { } @@ -50280,9 +50761,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR MemoryAllocateFlagsInfo( VULKAN_HPP_NAMESPACE::MemoryAllocateFlags flags_ = {}, uint32_t deviceMask_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , deviceMask( deviceMask_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , deviceMask{ deviceMask_ } { } @@ -50388,9 +50869,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR MemoryAllocateInfo( VULKAN_HPP_NAMESPACE::DeviceSize allocationSize_ = {}, uint32_t memoryTypeIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , allocationSize( allocationSize_ ) - , memoryTypeIndex( memoryTypeIndex_ ) + : pNext{ pNext_ } + , allocationSize{ allocationSize_ } + , memoryTypeIndex{ memoryTypeIndex_ } { } @@ -50491,9 +50972,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR MemoryBarrier( VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask_ = {}, VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcAccessMask( srcAccessMask_ ) - , dstAccessMask( dstAccessMask_ ) + : pNext{ pNext_ } + , srcAccessMask{ srcAccessMask_ } + , dstAccessMask{ dstAccessMask_ } { } @@ -50597,9 +51078,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR MemoryDedicatedAllocateInfo( VULKAN_HPP_NAMESPACE::Image image_ = {}, VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , image( image_ ) - , buffer( buffer_ ) + : pNext{ pNext_ } + , image{ image_ } + , buffer{ buffer_ } { } @@ -50705,9 +51186,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR MemoryDedicatedRequirements( VULKAN_HPP_NAMESPACE::Bool32 prefersDedicatedAllocation_ = {}, VULKAN_HPP_NAMESPACE::Bool32 requiresDedicatedAllocation_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , prefersDedicatedAllocation( prefersDedicatedAllocation_ ) - , requiresDedicatedAllocation( requiresDedicatedAllocation_ ) + : pNext{ pNext_ } + , prefersDedicatedAllocation{ prefersDedicatedAllocation_ } + , requiresDedicatedAllocation{ requiresDedicatedAllocation_ } { } @@ -50792,8 +51273,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MemoryFdPropertiesKHR( uint32_t memoryTypeBits_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memoryTypeBits( memoryTypeBits_ ) + : pNext{ pNext_ } + , memoryTypeBits{ memoryTypeBits_ } { } @@ -50876,8 +51357,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MemoryGetAndroidHardwareBufferInfoANDROID( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memory( memory_ ) + : pNext{ pNext_ } + , memory{ memory_ } { } @@ -50976,9 +51457,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memory( memory_ ) - , handleType( handleType_ ) + : pNext{ pNext_ } + , memory{ memory_ } + , handleType{ handleType_ } { } @@ -51083,9 +51564,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memory( memory_ ) - , handleType( handleType_ ) + : pNext{ pNext_ } + , memory{ memory_ } + , handleType{ handleType_ } { } @@ -51195,9 +51676,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memory( memory_ ) - , handleType( handleType_ ) + : pNext{ pNext_ } + , memory{ memory_ } + , handleType{ handleType_ } { } @@ -51308,9 +51789,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memory( memory_ ) - , handleType( handleType_ ) + : pNext{ pNext_ } + , memory{ memory_ } + , handleType{ handleType_ } { } @@ -51414,8 +51895,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MemoryHeap( VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, VULKAN_HPP_NAMESPACE::MemoryHeapFlags flags_ = {} ) VULKAN_HPP_NOEXCEPT - : size( size_ ) - , flags( flags_ ) + : size{ size_ } + , flags{ flags_ } { } @@ -51486,8 +51967,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MemoryHostPointerPropertiesEXT( uint32_t memoryTypeBits_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memoryTypeBits( memoryTypeBits_ ) + : pNext{ pNext_ } + , memoryTypeBits{ memoryTypeBits_ } { } @@ -51572,11 +52053,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , memory( memory_ ) - , offset( offset_ ) - , size( size_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , memory{ memory_ } + , offset{ offset_ } + , size{ size_ } { } @@ -51695,8 +52176,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MemoryMapPlacedInfoEXT( void * pPlacedAddress_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pPlacedAddress( pPlacedAddress_ ) + : pNext{ pNext_ } + , pPlacedAddress{ pPlacedAddress_ } { } @@ -51791,8 +52272,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MemoryOpaqueCaptureAddressAllocateInfo( uint64_t opaqueCaptureAddress_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , opaqueCaptureAddress( opaqueCaptureAddress_ ) + : pNext{ pNext_ } + , opaqueCaptureAddress{ opaqueCaptureAddress_ } { } @@ -51889,8 +52370,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MemoryPriorityAllocateInfoEXT( float priority_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , priority( priority_ ) + : pNext{ pNext_ } + , priority{ priority_ } { } @@ -51984,9 +52465,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR MemoryRequirements( VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize alignment_ = {}, uint32_t memoryTypeBits_ = {} ) VULKAN_HPP_NOEXCEPT - : size( size_ ) - , alignment( alignment_ ) - , memoryTypeBits( memoryTypeBits_ ) + : size{ size_ } + , alignment{ alignment_ } + , memoryTypeBits{ memoryTypeBits_ } { } @@ -52058,8 +52539,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MemoryRequirements2( VULKAN_HPP_NAMESPACE::MemoryRequirements memoryRequirements_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memoryRequirements( memoryRequirements_ ) + : pNext{ pNext_ } + , memoryRequirements{ memoryRequirements_ } { } @@ -52138,8 +52619,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MemoryType( VULKAN_HPP_NAMESPACE::MemoryPropertyFlags propertyFlags_ = {}, uint32_t heapIndex_ = {} ) VULKAN_HPP_NOEXCEPT - : propertyFlags( propertyFlags_ ) - , heapIndex( heapIndex_ ) + : propertyFlags{ propertyFlags_ } + , heapIndex{ heapIndex_ } { } @@ -52212,9 +52693,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR MemoryUnmapInfoKHR( VULKAN_HPP_NAMESPACE::MemoryUnmapFlagsKHR flags_ = {}, VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , memory( memory_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , memory{ memory_ } { } @@ -52317,8 +52798,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MemoryWin32HandlePropertiesKHR( uint32_t memoryTypeBits_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memoryTypeBits( memoryTypeBits_ ) + : pNext{ pNext_ } + , memoryTypeBits{ memoryTypeBits_ } { } @@ -52401,8 +52882,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MemoryZirconHandlePropertiesFUCHSIA( uint32_t memoryTypeBits_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memoryTypeBits( memoryTypeBits_ ) + : pNext{ pNext_ } + , memoryTypeBits{ memoryTypeBits_ } { } @@ -52487,9 +52968,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR MetalSurfaceCreateInfoEXT( VULKAN_HPP_NAMESPACE::MetalSurfaceCreateFlagsEXT flags_ = {}, const CAMetalLayer * pLayer_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , pLayer( pLayer_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , pLayer{ pLayer_ } { } @@ -52606,18 +53087,18 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR triangleArray_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize triangleArrayStride_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , type( type_ ) - , flags( flags_ ) - , mode( mode_ ) - , dstMicromap( dstMicromap_ ) - , usageCountsCount( usageCountsCount_ ) - , pUsageCounts( pUsageCounts_ ) - , ppUsageCounts( ppUsageCounts_ ) - , data( data_ ) - , scratchData( scratchData_ ) - , triangleArray( triangleArray_ ) - , triangleArrayStride( triangleArrayStride_ ) + : pNext{ pNext_ } + , type{ type_ } + , flags{ flags_ } + , mode{ mode_ } + , dstMicromap{ dstMicromap_ } + , usageCountsCount{ usageCountsCount_ } + , pUsageCounts{ pUsageCounts_ } + , ppUsageCounts{ ppUsageCounts_ } + , data{ data_ } + , scratchData{ scratchData_ } + , triangleArray{ triangleArray_ } + , triangleArrayStride{ triangleArrayStride_ } { } @@ -52837,10 +53318,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize buildScratchSize_ = {}, VULKAN_HPP_NAMESPACE::Bool32 discardable_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , micromapSize( micromapSize_ ) - , buildScratchSize( buildScratchSize_ ) - , discardable( discardable_ ) + : pNext{ pNext_ } + , micromapSize{ micromapSize_ } + , buildScratchSize{ buildScratchSize_ } + , discardable{ discardable_ } { } @@ -52960,13 +53441,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::MicromapTypeEXT type_ = VULKAN_HPP_NAMESPACE::MicromapTypeEXT::eOpacityMicromap, VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , createFlags( createFlags_ ) - , buffer( buffer_ ) - , offset( offset_ ) - , size( size_ ) - , type( type_ ) - , deviceAddress( deviceAddress_ ) + : pNext{ pNext_ } + , createFlags{ createFlags_ } + , buffer{ buffer_ } + , offset{ offset_ } + , size{ size_ } + , type{ type_ } + , deviceAddress{ deviceAddress_ } { } @@ -53101,9 +53582,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MicromapTriangleEXT( uint32_t dataOffset_ = {}, uint16_t subdivisionLevel_ = {}, uint16_t format_ = {} ) VULKAN_HPP_NOEXCEPT - : dataOffset( dataOffset_ ) - , subdivisionLevel( subdivisionLevel_ ) - , format( format_ ) + : dataOffset{ dataOffset_ } + , subdivisionLevel{ subdivisionLevel_ } + , format{ format_ } { } @@ -53197,8 +53678,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MicromapVersionInfoEXT( const uint8_t * pVersionData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pVersionData( pVersionData_ ) + : pNext{ pNext_ } + , pVersionData{ pVersionData_ } { } @@ -53290,9 +53771,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MultiDrawIndexedInfoEXT( uint32_t firstIndex_ = {}, uint32_t indexCount_ = {}, int32_t vertexOffset_ = {} ) VULKAN_HPP_NOEXCEPT - : firstIndex( firstIndex_ ) - , indexCount( indexCount_ ) - , vertexOffset( vertexOffset_ ) + : firstIndex{ firstIndex_ } + , indexCount{ indexCount_ } + , vertexOffset{ vertexOffset_ } { } @@ -53384,8 +53865,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MultiDrawInfoEXT( uint32_t firstVertex_ = {}, uint32_t vertexCount_ = {} ) VULKAN_HPP_NOEXCEPT - : firstVertex( firstVertex_ ) - , vertexCount( vertexCount_ ) + : firstVertex{ firstVertex_ } + , vertexCount{ vertexCount_ } { } @@ -53470,8 +53951,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MultisamplePropertiesEXT( VULKAN_HPP_NAMESPACE::Extent2D maxSampleLocationGridSize_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxSampleLocationGridSize( maxSampleLocationGridSize_ ) + : pNext{ pNext_ } + , maxSampleLocationGridSize{ maxSampleLocationGridSize_ } { } @@ -53555,9 +54036,9 @@ namespace VULKAN_HPP_NAMESPACE MultisampledRenderToSingleSampledInfoEXT( VULKAN_HPP_NAMESPACE::Bool32 multisampledRenderToSingleSampledEnable_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlagBits rasterizationSamples_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , multisampledRenderToSingleSampledEnable( multisampledRenderToSingleSampledEnable_ ) - , rasterizationSamples( rasterizationSamples_ ) + : pNext{ pNext_ } + , multisampledRenderToSingleSampledEnable{ multisampledRenderToSingleSampledEnable_ } + , rasterizationSamples{ rasterizationSamples_ } { } @@ -53667,9 +54148,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR MultiviewPerViewAttributesInfoNVX( VULKAN_HPP_NAMESPACE::Bool32 perViewAttributes_ = {}, VULKAN_HPP_NAMESPACE::Bool32 perViewAttributesPositionXOnly_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , perViewAttributes( perViewAttributes_ ) - , perViewAttributesPositionXOnly( perViewAttributesPositionXOnly_ ) + : pNext{ pNext_ } + , perViewAttributes{ perViewAttributes_ } + , perViewAttributesPositionXOnly{ perViewAttributesPositionXOnly_ } { } @@ -53775,9 +54256,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR MultiviewPerViewRenderAreasRenderPassBeginInfoQCOM( uint32_t perViewRenderAreaCount_ = {}, const VULKAN_HPP_NAMESPACE::Rect2D * pPerViewRenderAreas_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , perViewRenderAreaCount( perViewRenderAreaCount_ ) - , pPerViewRenderAreas( pPerViewRenderAreas_ ) + : pNext{ pNext_ } + , perViewRenderAreaCount{ perViewRenderAreaCount_ } + , pPerViewRenderAreas{ pPerViewRenderAreas_ } { } @@ -53900,8 +54381,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR MutableDescriptorTypeListEXT( uint32_t descriptorTypeCount_ = {}, const VULKAN_HPP_NAMESPACE::DescriptorType * pDescriptorTypes_ = {} ) VULKAN_HPP_NOEXCEPT - : descriptorTypeCount( descriptorTypeCount_ ) - , pDescriptorTypes( pDescriptorTypes_ ) + : descriptorTypeCount{ descriptorTypeCount_ } + , pDescriptorTypes{ pDescriptorTypes_ } { } @@ -54011,9 +54492,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR MutableDescriptorTypeCreateInfoEXT( uint32_t mutableDescriptorTypeListCount_ = {}, const VULKAN_HPP_NAMESPACE::MutableDescriptorTypeListEXT * pMutableDescriptorTypeLists_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , mutableDescriptorTypeListCount( mutableDescriptorTypeListCount_ ) - , pMutableDescriptorTypeLists( pMutableDescriptorTypeLists_ ) + : pNext{ pNext_ } + , mutableDescriptorTypeListCount{ mutableDescriptorTypeListCount_ } + , pMutableDescriptorTypeLists{ pMutableDescriptorTypeLists_ } { } @@ -54146,8 +54627,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR OpaqueCaptureDescriptorDataCreateInfoEXT( const void * opaqueCaptureDescriptorData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , opaqueCaptureDescriptorData( opaqueCaptureDescriptorData_ ) + : pNext{ pNext_ } + , opaqueCaptureDescriptorData{ opaqueCaptureDescriptorData_ } { } @@ -54246,10 +54727,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::Rect2D * pRegions_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , regionCount( regionCount_ ) - , pRegions( pRegions_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , regionCount{ regionCount_ } + , pRegions{ pRegions_ } { } @@ -54382,8 +54863,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR OpticalFlowImageFormatInfoNV( VULKAN_HPP_NAMESPACE::OpticalFlowUsageFlagsNV usage_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , usage( usage_ ) + : pNext{ pNext_ } + , usage{ usage_ } { } @@ -54479,8 +54960,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR OpticalFlowImageFormatPropertiesNV( VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , format( format_ ) + : pNext{ pNext_ } + , format{ format_ } { } @@ -54571,16 +55052,16 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::OpticalFlowPerformanceLevelNV performanceLevel_ = VULKAN_HPP_NAMESPACE::OpticalFlowPerformanceLevelNV::eUnknown, VULKAN_HPP_NAMESPACE::OpticalFlowSessionCreateFlagsNV flags_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , width( width_ ) - , height( height_ ) - , imageFormat( imageFormat_ ) - , flowVectorFormat( flowVectorFormat_ ) - , costFormat( costFormat_ ) - , outputGridSize( outputGridSize_ ) - , hintGridSize( hintGridSize_ ) - , performanceLevel( performanceLevel_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , width{ width_ } + , height{ height_ } + , imageFormat{ imageFormat_ } + , flowVectorFormat{ flowVectorFormat_ } + , costFormat{ costFormat_ } + , outputGridSize{ outputGridSize_ } + , hintGridSize{ hintGridSize_ } + , performanceLevel{ performanceLevel_ } + , flags{ flags_ } { } @@ -54749,10 +55230,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t size_ = {}, const void * pPrivateData_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , id( id_ ) - , size( size_ ) - , pPrivateData( pPrivateData_ ) + : pNext{ pNext_ } + , id{ id_ } + , size{ size_ } + , pPrivateData{ pPrivateData_ } { } @@ -54862,8 +55343,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR OutOfBandQueueTypeInfoNV( VULKAN_HPP_NAMESPACE::OutOfBandQueueTypeNV queueType_ = VULKAN_HPP_NAMESPACE::OutOfBandQueueTypeNV::eRender, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , queueType( queueType_ ) + : pNext{ pNext_ } + , queueType{ queueType_ } { } @@ -54959,11 +55440,11 @@ namespace VULKAN_HPP_NAMESPACE uint64_t actualPresentTime_ = {}, uint64_t earliestPresentTime_ = {}, uint64_t presentMargin_ = {} ) VULKAN_HPP_NOEXCEPT - : presentID( presentID_ ) - , desiredPresentTime( desiredPresentTime_ ) - , actualPresentTime( actualPresentTime_ ) - , earliestPresentTime( earliestPresentTime_ ) - , presentMargin( presentMargin_ ) + : presentID{ presentID_ } + , desiredPresentTime{ desiredPresentTime_ } + , actualPresentTime{ actualPresentTime_ } + , earliestPresentTime{ earliestPresentTime_ } + , presentMargin{ presentMargin_ } { } @@ -55044,8 +55525,8 @@ namespace VULKAN_HPP_NAMESPACE PerformanceConfigurationAcquireInfoINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationTypeINTEL type_ = VULKAN_HPP_NAMESPACE::PerformanceConfigurationTypeINTEL::eCommandQueueMetricsDiscoveryActivated, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , type( type_ ) + : pNext{ pNext_ } + , type{ type_ } { } @@ -55146,11 +55627,11 @@ namespace VULKAN_HPP_NAMESPACE std::array const & category_ = {}, std::array const & description_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , name( name_ ) - , category( category_ ) - , description( description_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , name{ name_ } + , category{ category_ } + , description{ description_ } { } @@ -55161,37 +55642,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PerformanceCounterDescriptionKHR( VULKAN_HPP_NAMESPACE::PerformanceCounterDescriptionFlagsKHR flags_, - std::string const & name_, - std::string const & category_ = {}, - std::string const & description_ = {}, - void * pNext_ = nullptr ) - : pNext( pNext_ ), flags( flags_ ) - { - VULKAN_HPP_ASSERT( name_.size() < VK_MAX_DESCRIPTION_SIZE ); -# if defined( WIN32 ) - strncpy_s( name, VK_MAX_DESCRIPTION_SIZE, name_.data(), name_.size() ); -# else - strncpy( name, name_.data(), std::min( VK_MAX_DESCRIPTION_SIZE, name_.size() ) ); -# endif - - VULKAN_HPP_ASSERT( category_.size() < VK_MAX_DESCRIPTION_SIZE ); -# if defined( WIN32 ) - strncpy_s( category, VK_MAX_DESCRIPTION_SIZE, category_.data(), category_.size() ); -# else - strncpy( category, category_.data(), std::min( VK_MAX_DESCRIPTION_SIZE, category_.size() ) ); -# endif - - VULKAN_HPP_ASSERT( description_.size() < VK_MAX_DESCRIPTION_SIZE ); -# if defined( WIN32 ) - strncpy_s( description, VK_MAX_DESCRIPTION_SIZE, description_.data(), description_.size() ); -# else - strncpy( description, description_.data(), std::min( VK_MAX_DESCRIPTION_SIZE, description_.size() ) ); -# endif - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - PerformanceCounterDescriptionKHR & operator=( PerformanceCounterDescriptionKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -55288,11 +55738,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::PerformanceCounterStorageKHR storage_ = VULKAN_HPP_NAMESPACE::PerformanceCounterStorageKHR::eInt32, std::array const & uuid_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , unit( unit_ ) - , scope( scope_ ) - , storage( storage_ ) - , uuid( uuid_ ) + : pNext{ pNext_ } + , unit{ unit_ } + , scope{ scope_ } + , storage{ storage_ } + , uuid{ uuid_ } { } @@ -55456,8 +55906,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PerformanceMarkerInfoINTEL( uint64_t marker_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , marker( marker_ ) + : pNext{ pNext_ } + , marker{ marker_ } { } @@ -55556,10 +56006,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 enable_ = {}, uint64_t parameter_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , type( type_ ) - , enable( enable_ ) - , parameter( parameter_ ) + : pNext{ pNext_ } + , type{ type_ } + , enable{ enable_ } + , parameter{ parameter_ } { } @@ -55672,8 +56122,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PerformanceQuerySubmitInfoKHR( uint32_t counterPassIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , counterPassIndex( counterPassIndex_ ) + : pNext{ pNext_ } + , counterPassIndex{ counterPassIndex_ } { } @@ -55768,8 +56218,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PerformanceStreamMarkerInfoINTEL( uint32_t marker_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , marker( marker_ ) + : pNext{ pNext_ } + , marker{ marker_ } { } @@ -55934,8 +56384,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_14 PerformanceValueINTEL( VULKAN_HPP_NAMESPACE::PerformanceValueTypeINTEL type_ = VULKAN_HPP_NAMESPACE::PerformanceValueTypeINTEL::eUint32, VULKAN_HPP_NAMESPACE::PerformanceValueDataINTEL data_ = {} ) VULKAN_HPP_NOEXCEPT - : type( type_ ) - , data( data_ ) + : type{ type_ } + , data{ data_ } { } @@ -55995,11 +56445,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 storagePushConstant16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 storageInputOutput16_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , storageBuffer16BitAccess( storageBuffer16BitAccess_ ) - , uniformAndStorageBuffer16BitAccess( uniformAndStorageBuffer16BitAccess_ ) - , storagePushConstant16( storagePushConstant16_ ) - , storageInputOutput16( storageInputOutput16_ ) + : pNext{ pNext_ } + , storageBuffer16BitAccess{ storageBuffer16BitAccess_ } + , uniformAndStorageBuffer16BitAccess{ uniformAndStorageBuffer16BitAccess_ } + , storagePushConstant16{ storagePushConstant16_ } + , storageInputOutput16{ storageInputOutput16_ } { } @@ -56130,9 +56580,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDevice4444FormatsFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 formatA4R4G4B4_ = {}, VULKAN_HPP_NAMESPACE::Bool32 formatA4B4G4R4_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , formatA4R4G4B4( formatA4R4G4B4_ ) - , formatA4B4G4R4( formatA4B4G4R4_ ) + : pNext{ pNext_ } + , formatA4R4G4B4{ formatA4R4G4B4_ } + , formatA4B4G4R4{ formatA4B4G4R4_ } { } @@ -56237,10 +56687,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 uniformAndStorageBuffer8BitAccess_ = {}, VULKAN_HPP_NAMESPACE::Bool32 storagePushConstant8_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , storageBuffer8BitAccess( storageBuffer8BitAccess_ ) - , uniformAndStorageBuffer8BitAccess( uniformAndStorageBuffer8BitAccess_ ) - , storagePushConstant8( storagePushConstant8_ ) + : pNext{ pNext_ } + , storageBuffer8BitAccess{ storageBuffer8BitAccess_ } + , uniformAndStorageBuffer8BitAccess{ uniformAndStorageBuffer8BitAccess_ } + , storagePushConstant8{ storagePushConstant8_ } { } @@ -56360,8 +56810,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceASTCDecodeFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 decodeModeSharedExponent_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , decodeModeSharedExponent( decodeModeSharedExponent_ ) + : pNext{ pNext_ } + , decodeModeSharedExponent{ decodeModeSharedExponent_ } { } @@ -56463,12 +56913,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureHostCommands_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingAccelerationStructureUpdateAfterBind_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , accelerationStructure( accelerationStructure_ ) - , accelerationStructureCaptureReplay( accelerationStructureCaptureReplay_ ) - , accelerationStructureIndirectBuild( accelerationStructureIndirectBuild_ ) - , accelerationStructureHostCommands( accelerationStructureHostCommands_ ) - , descriptorBindingAccelerationStructureUpdateAfterBind( descriptorBindingAccelerationStructureUpdateAfterBind_ ) + : pNext{ pNext_ } + , accelerationStructure{ accelerationStructure_ } + , accelerationStructureCaptureReplay{ accelerationStructureCaptureReplay_ } + , accelerationStructureIndirectBuild{ accelerationStructureIndirectBuild_ } + , accelerationStructureHostCommands{ accelerationStructureHostCommands_ } + , descriptorBindingAccelerationStructureUpdateAfterBind{ descriptorBindingAccelerationStructureUpdateAfterBind_ } { } @@ -56621,15 +57071,15 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxDescriptorSetUpdateAfterBindAccelerationStructures_ = {}, uint32_t minAccelerationStructureScratchOffsetAlignment_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxGeometryCount( maxGeometryCount_ ) - , maxInstanceCount( maxInstanceCount_ ) - , maxPrimitiveCount( maxPrimitiveCount_ ) - , maxPerStageDescriptorAccelerationStructures( maxPerStageDescriptorAccelerationStructures_ ) - , maxPerStageDescriptorUpdateAfterBindAccelerationStructures( maxPerStageDescriptorUpdateAfterBindAccelerationStructures_ ) - , maxDescriptorSetAccelerationStructures( maxDescriptorSetAccelerationStructures_ ) - , maxDescriptorSetUpdateAfterBindAccelerationStructures( maxDescriptorSetUpdateAfterBindAccelerationStructures_ ) - , minAccelerationStructureScratchOffsetAlignment( minAccelerationStructureScratchOffsetAlignment_ ) + : pNext{ pNext_ } + , maxGeometryCount{ maxGeometryCount_ } + , maxInstanceCount{ maxInstanceCount_ } + , maxPrimitiveCount{ maxPrimitiveCount_ } + , maxPerStageDescriptorAccelerationStructures{ maxPerStageDescriptorAccelerationStructures_ } + , maxPerStageDescriptorUpdateAfterBindAccelerationStructures{ maxPerStageDescriptorUpdateAfterBindAccelerationStructures_ } + , maxDescriptorSetAccelerationStructures{ maxDescriptorSetAccelerationStructures_ } + , maxDescriptorSetUpdateAfterBindAccelerationStructures{ maxDescriptorSetUpdateAfterBindAccelerationStructures_ } + , minAccelerationStructureScratchOffsetAlignment{ minAccelerationStructureScratchOffsetAlignment_ } { } @@ -56743,8 +57193,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceAddressBindingReportFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 reportAddressBinding_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , reportAddressBinding( reportAddressBinding_ ) + : pNext{ pNext_ } + , reportAddressBinding{ reportAddressBinding_ } { } @@ -56842,8 +57292,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceAmigoProfilingFeaturesSEC( VULKAN_HPP_NAMESPACE::Bool32 amigoProfiling_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , amigoProfiling( amigoProfiling_ ) + : pNext{ pNext_ } + , amigoProfiling{ amigoProfiling_ } { } @@ -56929,6 +57379,102 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceAmigoProfilingFeaturesSEC; }; + struct PhysicalDeviceAntiLagFeaturesAMD + { + using NativeType = VkPhysicalDeviceAntiLagFeaturesAMD; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceAntiLagFeaturesAMD; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceAntiLagFeaturesAMD( VULKAN_HPP_NAMESPACE::Bool32 antiLag_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , antiLag{ antiLag_ } + { + } + + VULKAN_HPP_CONSTEXPR PhysicalDeviceAntiLagFeaturesAMD( PhysicalDeviceAntiLagFeaturesAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceAntiLagFeaturesAMD( VkPhysicalDeviceAntiLagFeaturesAMD const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceAntiLagFeaturesAMD( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceAntiLagFeaturesAMD & operator=( PhysicalDeviceAntiLagFeaturesAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceAntiLagFeaturesAMD & operator=( VkPhysicalDeviceAntiLagFeaturesAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceAntiLagFeaturesAMD & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceAntiLagFeaturesAMD & setAntiLag( VULKAN_HPP_NAMESPACE::Bool32 antiLag_ ) VULKAN_HPP_NOEXCEPT + { + antiLag = antiLag_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPhysicalDeviceAntiLagFeaturesAMD const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceAntiLagFeaturesAMD &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, antiLag ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceAntiLagFeaturesAMD const & ) const = default; +#else + bool operator==( PhysicalDeviceAntiLagFeaturesAMD const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( antiLag == rhs.antiLag ); +# endif + } + + bool operator!=( PhysicalDeviceAntiLagFeaturesAMD const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceAntiLagFeaturesAMD; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 antiLag = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceAntiLagFeaturesAMD; + }; + struct PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT { using NativeType = VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT; @@ -56939,8 +57485,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 attachmentFeedbackLoopDynamicState_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , attachmentFeedbackLoopDynamicState( attachmentFeedbackLoopDynamicState_ ) + : pNext{ pNext_ } + , attachmentFeedbackLoopDynamicState{ attachmentFeedbackLoopDynamicState_ } { } @@ -57041,8 +57587,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 attachmentFeedbackLoopLayout_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , attachmentFeedbackLoopLayout( attachmentFeedbackLoopLayout_ ) + : pNext{ pNext_ } + , attachmentFeedbackLoopLayout{ attachmentFeedbackLoopLayout_ } { } @@ -57141,8 +57687,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceBlendOperationAdvancedFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 advancedBlendCoherentOperations_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , advancedBlendCoherentOperations( advancedBlendCoherentOperations_ ) + : pNext{ pNext_ } + , advancedBlendCoherentOperations{ advancedBlendCoherentOperations_ } { } @@ -57245,13 +57791,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 advancedBlendCorrelatedOverlap_ = {}, VULKAN_HPP_NAMESPACE::Bool32 advancedBlendAllOperations_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , advancedBlendMaxColorAttachments( advancedBlendMaxColorAttachments_ ) - , advancedBlendIndependentBlend( advancedBlendIndependentBlend_ ) - , advancedBlendNonPremultipliedSrcColor( advancedBlendNonPremultipliedSrcColor_ ) - , advancedBlendNonPremultipliedDstColor( advancedBlendNonPremultipliedDstColor_ ) - , advancedBlendCorrelatedOverlap( advancedBlendCorrelatedOverlap_ ) - , advancedBlendAllOperations( advancedBlendAllOperations_ ) + : pNext{ pNext_ } + , advancedBlendMaxColorAttachments{ advancedBlendMaxColorAttachments_ } + , advancedBlendIndependentBlend{ advancedBlendIndependentBlend_ } + , advancedBlendNonPremultipliedSrcColor{ advancedBlendNonPremultipliedSrcColor_ } + , advancedBlendNonPremultipliedDstColor{ advancedBlendNonPremultipliedDstColor_ } + , advancedBlendCorrelatedOverlap{ advancedBlendCorrelatedOverlap_ } + , advancedBlendAllOperations{ advancedBlendAllOperations_ } { } @@ -57359,9 +57905,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceBorderColorSwizzleFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 borderColorSwizzle_ = {}, VULKAN_HPP_NAMESPACE::Bool32 borderColorSwizzleFromImage_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , borderColorSwizzle( borderColorSwizzle_ ) - , borderColorSwizzleFromImage( borderColorSwizzleFromImage_ ) + : pNext{ pNext_ } + , borderColorSwizzle{ borderColorSwizzle_ } + , borderColorSwizzleFromImage{ borderColorSwizzleFromImage_ } { } @@ -57469,10 +58015,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressCaptureReplay_ = {}, VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressMultiDevice_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , bufferDeviceAddress( bufferDeviceAddress_ ) - , bufferDeviceAddressCaptureReplay( bufferDeviceAddressCaptureReplay_ ) - , bufferDeviceAddressMultiDevice( bufferDeviceAddressMultiDevice_ ) + : pNext{ pNext_ } + , bufferDeviceAddress{ bufferDeviceAddress_ } + , bufferDeviceAddressCaptureReplay{ bufferDeviceAddressCaptureReplay_ } + , bufferDeviceAddressMultiDevice{ bufferDeviceAddressMultiDevice_ } { } @@ -57595,10 +58141,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressCaptureReplay_ = {}, VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressMultiDevice_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , bufferDeviceAddress( bufferDeviceAddress_ ) - , bufferDeviceAddressCaptureReplay( bufferDeviceAddressCaptureReplay_ ) - , bufferDeviceAddressMultiDevice( bufferDeviceAddressMultiDevice_ ) + : pNext{ pNext_ } + , bufferDeviceAddress{ bufferDeviceAddress_ } + , bufferDeviceAddressCaptureReplay{ bufferDeviceAddressCaptureReplay_ } + , bufferDeviceAddressMultiDevice{ bufferDeviceAddressMultiDevice_ } { } @@ -57720,9 +58266,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceClusterCullingShaderFeaturesHUAWEI( VULKAN_HPP_NAMESPACE::Bool32 clustercullingShader_ = {}, VULKAN_HPP_NAMESPACE::Bool32 multiviewClusterCullingShader_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , clustercullingShader( clustercullingShader_ ) - , multiviewClusterCullingShader( multiviewClusterCullingShader_ ) + : pNext{ pNext_ } + , clustercullingShader{ clustercullingShader_ } + , multiviewClusterCullingShader{ multiviewClusterCullingShader_ } { } @@ -57832,11 +58378,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxOutputClusterCount_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize indirectBufferOffsetAlignment_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxWorkGroupCount( maxWorkGroupCount_ ) - , maxWorkGroupSize( maxWorkGroupSize_ ) - , maxOutputClusterCount( maxOutputClusterCount_ ) - , indirectBufferOffsetAlignment( indirectBufferOffsetAlignment_ ) + : pNext{ pNext_ } + , maxWorkGroupCount{ maxWorkGroupCount_ } + , maxWorkGroupSize{ maxWorkGroupSize_ } + , maxOutputClusterCount{ maxOutputClusterCount_ } + , indirectBufferOffsetAlignment{ indirectBufferOffsetAlignment_ } { } @@ -57930,8 +58476,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceClusterCullingShaderVrsFeaturesHUAWEI( VULKAN_HPP_NAMESPACE::Bool32 clusterShadingRate_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , clusterShadingRate( clusterShadingRate_ ) + : pNext{ pNext_ } + , clusterShadingRate{ clusterShadingRate_ } { } @@ -58030,8 +58576,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceCoherentMemoryFeaturesAMD( VULKAN_HPP_NAMESPACE::Bool32 deviceCoherentMemory_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceCoherentMemory( deviceCoherentMemory_ ) + : pNext{ pNext_ } + , deviceCoherentMemory{ deviceCoherentMemory_ } { } @@ -58128,8 +58674,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceColorWriteEnableFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 colorWriteEnable_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , colorWriteEnable( colorWriteEnable_ ) + : pNext{ pNext_ } + , colorWriteEnable{ colorWriteEnable_ } { } @@ -58216,55 +58762,155 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceColorWriteEnableFeaturesEXT; }; - struct PhysicalDeviceComputeShaderDerivativesFeaturesNV + struct PhysicalDeviceCommandBufferInheritanceFeaturesNV { - using NativeType = VkPhysicalDeviceComputeShaderDerivativesFeaturesNV; + using NativeType = VkPhysicalDeviceCommandBufferInheritanceFeaturesNV; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceComputeShaderDerivativesFeaturesNV; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceCommandBufferInheritanceFeaturesNV; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceComputeShaderDerivativesFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 computeDerivativeGroupQuads_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 computeDerivativeGroupLinear_ = {}, + VULKAN_HPP_CONSTEXPR PhysicalDeviceCommandBufferInheritanceFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 commandBufferInheritance_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , computeDerivativeGroupQuads( computeDerivativeGroupQuads_ ) - , computeDerivativeGroupLinear( computeDerivativeGroupLinear_ ) + : pNext{ pNext_ } + , commandBufferInheritance{ commandBufferInheritance_ } { } VULKAN_HPP_CONSTEXPR - PhysicalDeviceComputeShaderDerivativesFeaturesNV( PhysicalDeviceComputeShaderDerivativesFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceCommandBufferInheritanceFeaturesNV( PhysicalDeviceCommandBufferInheritanceFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceComputeShaderDerivativesFeaturesNV( VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceComputeShaderDerivativesFeaturesNV( *reinterpret_cast( &rhs ) ) + PhysicalDeviceCommandBufferInheritanceFeaturesNV( VkPhysicalDeviceCommandBufferInheritanceFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceCommandBufferInheritanceFeaturesNV( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceComputeShaderDerivativesFeaturesNV & operator=( PhysicalDeviceComputeShaderDerivativesFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceCommandBufferInheritanceFeaturesNV & operator=( PhysicalDeviceCommandBufferInheritanceFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceComputeShaderDerivativesFeaturesNV & operator=( VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceCommandBufferInheritanceFeaturesNV & operator=( VkPhysicalDeviceCommandBufferInheritanceFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceComputeShaderDerivativesFeaturesNV & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceCommandBufferInheritanceFeaturesNV & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceComputeShaderDerivativesFeaturesNV & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceCommandBufferInheritanceFeaturesNV & + setCommandBufferInheritance( VULKAN_HPP_NAMESPACE::Bool32 commandBufferInheritance_ ) VULKAN_HPP_NOEXCEPT + { + commandBufferInheritance = commandBufferInheritance_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPhysicalDeviceCommandBufferInheritanceFeaturesNV const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceCommandBufferInheritanceFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, commandBufferInheritance ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceCommandBufferInheritanceFeaturesNV const & ) const = default; +#else + bool operator==( PhysicalDeviceCommandBufferInheritanceFeaturesNV const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( commandBufferInheritance == rhs.commandBufferInheritance ); +# endif + } + + bool operator!=( PhysicalDeviceCommandBufferInheritanceFeaturesNV const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceCommandBufferInheritanceFeaturesNV; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 commandBufferInheritance = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceCommandBufferInheritanceFeaturesNV; + }; + + struct PhysicalDeviceComputeShaderDerivativesFeaturesKHR + { + using NativeType = VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceComputeShaderDerivativesFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceComputeShaderDerivativesFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 computeDerivativeGroupQuads_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 computeDerivativeGroupLinear_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , computeDerivativeGroupQuads{ computeDerivativeGroupQuads_ } + , computeDerivativeGroupLinear{ computeDerivativeGroupLinear_ } + { + } + + VULKAN_HPP_CONSTEXPR + PhysicalDeviceComputeShaderDerivativesFeaturesKHR( PhysicalDeviceComputeShaderDerivativesFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceComputeShaderDerivativesFeaturesKHR( VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceComputeShaderDerivativesFeaturesKHR( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceComputeShaderDerivativesFeaturesKHR & + operator=( PhysicalDeviceComputeShaderDerivativesFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceComputeShaderDerivativesFeaturesKHR & operator=( VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceComputeShaderDerivativesFeaturesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceComputeShaderDerivativesFeaturesKHR & setComputeDerivativeGroupQuads( VULKAN_HPP_NAMESPACE::Bool32 computeDerivativeGroupQuads_ ) VULKAN_HPP_NOEXCEPT { computeDerivativeGroupQuads = computeDerivativeGroupQuads_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceComputeShaderDerivativesFeaturesNV & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceComputeShaderDerivativesFeaturesKHR & setComputeDerivativeGroupLinear( VULKAN_HPP_NAMESPACE::Bool32 computeDerivativeGroupLinear_ ) VULKAN_HPP_NOEXCEPT { computeDerivativeGroupLinear = computeDerivativeGroupLinear_; @@ -58272,14 +58918,14 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceComputeShaderDerivativesFeaturesNV &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -58295,9 +58941,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceComputeShaderDerivativesFeaturesNV const & ) const = default; + auto operator<=>( PhysicalDeviceComputeShaderDerivativesFeaturesKHR const & ) const = default; #else - bool operator==( PhysicalDeviceComputeShaderDerivativesFeaturesNV const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceComputeShaderDerivativesFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -58307,23 +58953,110 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceComputeShaderDerivativesFeaturesNV const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceComputeShaderDerivativesFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceComputeShaderDerivativesFeaturesNV; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceComputeShaderDerivativesFeaturesKHR; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 computeDerivativeGroupQuads = {}; VULKAN_HPP_NAMESPACE::Bool32 computeDerivativeGroupLinear = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceComputeShaderDerivativesFeaturesNV; + using Type = PhysicalDeviceComputeShaderDerivativesFeaturesKHR; + }; + + using PhysicalDeviceComputeShaderDerivativesFeaturesNV = PhysicalDeviceComputeShaderDerivativesFeaturesKHR; + + struct PhysicalDeviceComputeShaderDerivativesPropertiesKHR + { + using NativeType = VkPhysicalDeviceComputeShaderDerivativesPropertiesKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceComputeShaderDerivativesPropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceComputeShaderDerivativesPropertiesKHR( VULKAN_HPP_NAMESPACE::Bool32 meshAndTaskShaderDerivatives_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , meshAndTaskShaderDerivatives{ meshAndTaskShaderDerivatives_ } + { + } + + VULKAN_HPP_CONSTEXPR + PhysicalDeviceComputeShaderDerivativesPropertiesKHR( PhysicalDeviceComputeShaderDerivativesPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceComputeShaderDerivativesPropertiesKHR( VkPhysicalDeviceComputeShaderDerivativesPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceComputeShaderDerivativesPropertiesKHR( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceComputeShaderDerivativesPropertiesKHR & + operator=( PhysicalDeviceComputeShaderDerivativesPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceComputeShaderDerivativesPropertiesKHR & operator=( VkPhysicalDeviceComputeShaderDerivativesPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + operator VkPhysicalDeviceComputeShaderDerivativesPropertiesKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceComputeShaderDerivativesPropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, meshAndTaskShaderDerivatives ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceComputeShaderDerivativesPropertiesKHR const & ) const = default; +#else + bool operator==( PhysicalDeviceComputeShaderDerivativesPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( meshAndTaskShaderDerivatives == rhs.meshAndTaskShaderDerivatives ); +# endif + } + + bool operator!=( PhysicalDeviceComputeShaderDerivativesPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceComputeShaderDerivativesPropertiesKHR; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 meshAndTaskShaderDerivatives = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceComputeShaderDerivativesPropertiesKHR; }; struct PhysicalDeviceConditionalRenderingFeaturesEXT @@ -58337,9 +59070,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceConditionalRenderingFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 conditionalRendering_ = {}, VULKAN_HPP_NAMESPACE::Bool32 inheritedConditionalRendering_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , conditionalRendering( conditionalRendering_ ) - , inheritedConditionalRendering( inheritedConditionalRendering_ ) + : pNext{ pNext_ } + , conditionalRendering{ conditionalRendering_ } + , inheritedConditionalRendering{ inheritedConditionalRendering_ } { } @@ -58454,16 +59187,16 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 fullyCoveredFragmentShaderInputVariable_ = {}, VULKAN_HPP_NAMESPACE::Bool32 conservativeRasterizationPostDepthCoverage_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , primitiveOverestimationSize( primitiveOverestimationSize_ ) - , maxExtraPrimitiveOverestimationSize( maxExtraPrimitiveOverestimationSize_ ) - , extraPrimitiveOverestimationSizeGranularity( extraPrimitiveOverestimationSizeGranularity_ ) - , primitiveUnderestimation( primitiveUnderestimation_ ) - , conservativePointAndLineRasterization( conservativePointAndLineRasterization_ ) - , degenerateTrianglesRasterized( degenerateTrianglesRasterized_ ) - , degenerateLinesRasterized( degenerateLinesRasterized_ ) - , fullyCoveredFragmentShaderInputVariable( fullyCoveredFragmentShaderInputVariable_ ) - , conservativeRasterizationPostDepthCoverage( conservativeRasterizationPostDepthCoverage_ ) + : pNext{ pNext_ } + , primitiveOverestimationSize{ primitiveOverestimationSize_ } + , maxExtraPrimitiveOverestimationSize{ maxExtraPrimitiveOverestimationSize_ } + , extraPrimitiveOverestimationSizeGranularity{ extraPrimitiveOverestimationSizeGranularity_ } + , primitiveUnderestimation{ primitiveUnderestimation_ } + , conservativePointAndLineRasterization{ conservativePointAndLineRasterization_ } + , degenerateTrianglesRasterized{ degenerateTrianglesRasterized_ } + , degenerateLinesRasterized{ degenerateLinesRasterized_ } + , fullyCoveredFragmentShaderInputVariable{ fullyCoveredFragmentShaderInputVariable_ } + , conservativeRasterizationPostDepthCoverage{ conservativeRasterizationPostDepthCoverage_ } { } @@ -58583,9 +59316,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceCooperativeMatrixFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 cooperativeMatrix_ = {}, VULKAN_HPP_NAMESPACE::Bool32 cooperativeMatrixRobustBufferAccess_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , cooperativeMatrix( cooperativeMatrix_ ) - , cooperativeMatrixRobustBufferAccess( cooperativeMatrixRobustBufferAccess_ ) + : pNext{ pNext_ } + , cooperativeMatrix{ cooperativeMatrix_ } + , cooperativeMatrixRobustBufferAccess{ cooperativeMatrixRobustBufferAccess_ } { } @@ -58692,9 +59425,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceCooperativeMatrixFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 cooperativeMatrix_ = {}, VULKAN_HPP_NAMESPACE::Bool32 cooperativeMatrixRobustBufferAccess_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , cooperativeMatrix( cooperativeMatrix_ ) - , cooperativeMatrixRobustBufferAccess( cooperativeMatrixRobustBufferAccess_ ) + : pNext{ pNext_ } + , cooperativeMatrix{ cooperativeMatrix_ } + , cooperativeMatrixRobustBufferAccess{ cooperativeMatrixRobustBufferAccess_ } { } @@ -58800,8 +59533,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceCooperativeMatrixPropertiesKHR( VULKAN_HPP_NAMESPACE::ShaderStageFlags cooperativeMatrixSupportedStages_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , cooperativeMatrixSupportedStages( cooperativeMatrixSupportedStages_ ) + : pNext{ pNext_ } + , cooperativeMatrixSupportedStages{ cooperativeMatrixSupportedStages_ } { } @@ -58883,8 +59616,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceCooperativeMatrixPropertiesNV( VULKAN_HPP_NAMESPACE::ShaderStageFlags cooperativeMatrixSupportedStages_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , cooperativeMatrixSupportedStages( cooperativeMatrixSupportedStages_ ) + : pNext{ pNext_ } + , cooperativeMatrixSupportedStages{ cooperativeMatrixSupportedStages_ } { } @@ -58966,8 +59699,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceCopyMemoryIndirectFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 indirectCopy_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , indirectCopy( indirectCopy_ ) + : pNext{ pNext_ } + , indirectCopy{ indirectCopy_ } { } @@ -59063,8 +59796,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceCopyMemoryIndirectPropertiesNV( VULKAN_HPP_NAMESPACE::QueueFlags supportedQueues_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , supportedQueues( supportedQueues_ ) + : pNext{ pNext_ } + , supportedQueues{ supportedQueues_ } { } @@ -59146,8 +59879,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceCornerSampledImageFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 cornerSampledImage_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , cornerSampledImage( cornerSampledImage_ ) + : pNext{ pNext_ } + , cornerSampledImage{ cornerSampledImage_ } { } @@ -59244,8 +59977,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceCoverageReductionModeFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 coverageReductionMode_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , coverageReductionMode( coverageReductionMode_ ) + : pNext{ pNext_ } + , coverageReductionMode{ coverageReductionMode_ } { } @@ -59342,8 +60075,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceCubicClampFeaturesQCOM( VULKAN_HPP_NAMESPACE::Bool32 cubicRangeClamp_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , cubicRangeClamp( cubicRangeClamp_ ) + : pNext{ pNext_ } + , cubicRangeClamp{ cubicRangeClamp_ } { } @@ -59439,8 +60172,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceCubicWeightsFeaturesQCOM( VULKAN_HPP_NAMESPACE::Bool32 selectableCubicWeights_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , selectableCubicWeights( selectableCubicWeights_ ) + : pNext{ pNext_ } + , selectableCubicWeights{ selectableCubicWeights_ } { } @@ -59538,8 +60271,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceCudaKernelLaunchFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 cudaKernelLaunchFeatures_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , cudaKernelLaunchFeatures( cudaKernelLaunchFeatures_ ) + : pNext{ pNext_ } + , cudaKernelLaunchFeatures{ cudaKernelLaunchFeatures_ } { } @@ -59639,9 +60372,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceCudaKernelLaunchPropertiesNV( uint32_t computeCapabilityMinor_ = {}, uint32_t computeCapabilityMajor_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , computeCapabilityMinor( computeCapabilityMinor_ ) - , computeCapabilityMajor( computeCapabilityMajor_ ) + : pNext{ pNext_ } + , computeCapabilityMinor{ computeCapabilityMinor_ } + , computeCapabilityMajor{ computeCapabilityMajor_ } { } @@ -59727,9 +60460,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceCustomBorderColorFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 customBorderColors_ = {}, VULKAN_HPP_NAMESPACE::Bool32 customBorderColorWithoutFormat_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , customBorderColors( customBorderColors_ ) - , customBorderColorWithoutFormat( customBorderColorWithoutFormat_ ) + : pNext{ pNext_ } + , customBorderColors{ customBorderColors_ } + , customBorderColorWithoutFormat{ customBorderColorWithoutFormat_ } { } @@ -59835,8 +60568,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceCustomBorderColorPropertiesEXT( uint32_t maxCustomBorderColorSamplers_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxCustomBorderColorSamplers( maxCustomBorderColorSamplers_ ) + : pNext{ pNext_ } + , maxCustomBorderColorSamplers{ maxCustomBorderColorSamplers_ } { } @@ -59918,8 +60651,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 dedicatedAllocationImageAliasing_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , dedicatedAllocationImageAliasing( dedicatedAllocationImageAliasing_ ) + : pNext{ pNext_ } + , dedicatedAllocationImageAliasing{ dedicatedAllocationImageAliasing_ } { } @@ -60022,11 +60755,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 floatRepresentation_ = {}, VULKAN_HPP_NAMESPACE::Bool32 depthBiasExact_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , depthBiasControl( depthBiasControl_ ) - , leastRepresentableValueForceUnormRepresentation( leastRepresentableValueForceUnormRepresentation_ ) - , floatRepresentation( floatRepresentation_ ) - , depthBiasExact( depthBiasExact_ ) + : pNext{ pNext_ } + , depthBiasControl{ depthBiasControl_ } + , leastRepresentableValueForceUnormRepresentation{ leastRepresentableValueForceUnormRepresentation_ } + , floatRepresentation{ floatRepresentation_ } + , depthBiasExact{ depthBiasExact_ } { } @@ -60153,8 +60886,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDepthClampZeroOneFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 depthClampZeroOne_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , depthClampZeroOne( depthClampZeroOne_ ) + : pNext{ pNext_ } + , depthClampZeroOne{ depthClampZeroOne_ } { } @@ -60251,8 +60984,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDepthClipControlFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 depthClipControl_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , depthClipControl( depthClipControl_ ) + : pNext{ pNext_ } + , depthClipControl{ depthClipControl_ } { } @@ -60349,8 +61082,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDepthClipEnableFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 depthClipEnable_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , depthClipEnable( depthClipEnable_ ) + : pNext{ pNext_ } + , depthClipEnable{ depthClipEnable_ } { } @@ -60449,11 +61182,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 independentResolveNone_ = {}, VULKAN_HPP_NAMESPACE::Bool32 independentResolve_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , supportedDepthResolveModes( supportedDepthResolveModes_ ) - , supportedStencilResolveModes( supportedStencilResolveModes_ ) - , independentResolveNone( independentResolveNone_ ) - , independentResolve( independentResolve_ ) + : pNext{ pNext_ } + , supportedDepthResolveModes{ supportedDepthResolveModes_ } + , supportedStencilResolveModes{ supportedStencilResolveModes_ } + , independentResolveNone{ independentResolveNone_ } + , independentResolve{ independentResolve_ } { } @@ -60547,8 +61280,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDescriptorBufferDensityMapPropertiesEXT( size_t combinedImageSamplerDensityMapDescriptorSize_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , combinedImageSamplerDensityMapDescriptorSize( combinedImageSamplerDensityMapDescriptorSize_ ) + : pNext{ pNext_ } + , combinedImageSamplerDensityMapDescriptorSize{ combinedImageSamplerDensityMapDescriptorSize_ } { } @@ -60636,11 +61369,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 descriptorBufferImageLayoutIgnored_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBufferPushDescriptors_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , descriptorBuffer( descriptorBuffer_ ) - , descriptorBufferCaptureReplay( descriptorBufferCaptureReplay_ ) - , descriptorBufferImageLayoutIgnored( descriptorBufferImageLayoutIgnored_ ) - , descriptorBufferPushDescriptors( descriptorBufferPushDescriptors_ ) + : pNext{ pNext_ } + , descriptorBuffer{ descriptorBuffer_ } + , descriptorBufferCaptureReplay{ descriptorBufferCaptureReplay_ } + , descriptorBufferImageLayoutIgnored{ descriptorBufferImageLayoutIgnored_ } + , descriptorBufferPushDescriptors{ descriptorBufferPushDescriptors_ } { } @@ -60801,40 +61534,40 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize resourceDescriptorBufferAddressSpaceSize_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize descriptorBufferAddressSpaceSize_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , combinedImageSamplerDescriptorSingleArray( combinedImageSamplerDescriptorSingleArray_ ) - , bufferlessPushDescriptors( bufferlessPushDescriptors_ ) - , allowSamplerImageViewPostSubmitCreation( allowSamplerImageViewPostSubmitCreation_ ) - , descriptorBufferOffsetAlignment( descriptorBufferOffsetAlignment_ ) - , maxDescriptorBufferBindings( maxDescriptorBufferBindings_ ) - , maxResourceDescriptorBufferBindings( maxResourceDescriptorBufferBindings_ ) - , maxSamplerDescriptorBufferBindings( maxSamplerDescriptorBufferBindings_ ) - , maxEmbeddedImmutableSamplerBindings( maxEmbeddedImmutableSamplerBindings_ ) - , maxEmbeddedImmutableSamplers( maxEmbeddedImmutableSamplers_ ) - , bufferCaptureReplayDescriptorDataSize( bufferCaptureReplayDescriptorDataSize_ ) - , imageCaptureReplayDescriptorDataSize( imageCaptureReplayDescriptorDataSize_ ) - , imageViewCaptureReplayDescriptorDataSize( imageViewCaptureReplayDescriptorDataSize_ ) - , samplerCaptureReplayDescriptorDataSize( samplerCaptureReplayDescriptorDataSize_ ) - , accelerationStructureCaptureReplayDescriptorDataSize( accelerationStructureCaptureReplayDescriptorDataSize_ ) - , samplerDescriptorSize( samplerDescriptorSize_ ) - , combinedImageSamplerDescriptorSize( combinedImageSamplerDescriptorSize_ ) - , sampledImageDescriptorSize( sampledImageDescriptorSize_ ) - , storageImageDescriptorSize( storageImageDescriptorSize_ ) - , uniformTexelBufferDescriptorSize( uniformTexelBufferDescriptorSize_ ) - , robustUniformTexelBufferDescriptorSize( robustUniformTexelBufferDescriptorSize_ ) - , storageTexelBufferDescriptorSize( storageTexelBufferDescriptorSize_ ) - , robustStorageTexelBufferDescriptorSize( robustStorageTexelBufferDescriptorSize_ ) - , uniformBufferDescriptorSize( uniformBufferDescriptorSize_ ) - , robustUniformBufferDescriptorSize( robustUniformBufferDescriptorSize_ ) - , storageBufferDescriptorSize( storageBufferDescriptorSize_ ) - , robustStorageBufferDescriptorSize( robustStorageBufferDescriptorSize_ ) - , inputAttachmentDescriptorSize( inputAttachmentDescriptorSize_ ) - , accelerationStructureDescriptorSize( accelerationStructureDescriptorSize_ ) - , maxSamplerDescriptorBufferRange( maxSamplerDescriptorBufferRange_ ) - , maxResourceDescriptorBufferRange( maxResourceDescriptorBufferRange_ ) - , samplerDescriptorBufferAddressSpaceSize( samplerDescriptorBufferAddressSpaceSize_ ) - , resourceDescriptorBufferAddressSpaceSize( resourceDescriptorBufferAddressSpaceSize_ ) - , descriptorBufferAddressSpaceSize( descriptorBufferAddressSpaceSize_ ) + : pNext{ pNext_ } + , combinedImageSamplerDescriptorSingleArray{ combinedImageSamplerDescriptorSingleArray_ } + , bufferlessPushDescriptors{ bufferlessPushDescriptors_ } + , allowSamplerImageViewPostSubmitCreation{ allowSamplerImageViewPostSubmitCreation_ } + , descriptorBufferOffsetAlignment{ descriptorBufferOffsetAlignment_ } + , maxDescriptorBufferBindings{ maxDescriptorBufferBindings_ } + , maxResourceDescriptorBufferBindings{ maxResourceDescriptorBufferBindings_ } + , maxSamplerDescriptorBufferBindings{ maxSamplerDescriptorBufferBindings_ } + , maxEmbeddedImmutableSamplerBindings{ maxEmbeddedImmutableSamplerBindings_ } + , maxEmbeddedImmutableSamplers{ maxEmbeddedImmutableSamplers_ } + , bufferCaptureReplayDescriptorDataSize{ bufferCaptureReplayDescriptorDataSize_ } + , imageCaptureReplayDescriptorDataSize{ imageCaptureReplayDescriptorDataSize_ } + , imageViewCaptureReplayDescriptorDataSize{ imageViewCaptureReplayDescriptorDataSize_ } + , samplerCaptureReplayDescriptorDataSize{ samplerCaptureReplayDescriptorDataSize_ } + , accelerationStructureCaptureReplayDescriptorDataSize{ accelerationStructureCaptureReplayDescriptorDataSize_ } + , samplerDescriptorSize{ samplerDescriptorSize_ } + , combinedImageSamplerDescriptorSize{ combinedImageSamplerDescriptorSize_ } + , sampledImageDescriptorSize{ sampledImageDescriptorSize_ } + , storageImageDescriptorSize{ storageImageDescriptorSize_ } + , uniformTexelBufferDescriptorSize{ uniformTexelBufferDescriptorSize_ } + , robustUniformTexelBufferDescriptorSize{ robustUniformTexelBufferDescriptorSize_ } + , storageTexelBufferDescriptorSize{ storageTexelBufferDescriptorSize_ } + , robustStorageTexelBufferDescriptorSize{ robustStorageTexelBufferDescriptorSize_ } + , uniformBufferDescriptorSize{ uniformBufferDescriptorSize_ } + , robustUniformBufferDescriptorSize{ robustUniformBufferDescriptorSize_ } + , storageBufferDescriptorSize{ storageBufferDescriptorSize_ } + , robustStorageBufferDescriptorSize{ robustStorageBufferDescriptorSize_ } + , inputAttachmentDescriptorSize{ inputAttachmentDescriptorSize_ } + , accelerationStructureDescriptorSize{ accelerationStructureDescriptorSize_ } + , maxSamplerDescriptorBufferRange{ maxSamplerDescriptorBufferRange_ } + , maxResourceDescriptorBufferRange{ maxResourceDescriptorBufferRange_ } + , samplerDescriptorBufferAddressSpaceSize{ samplerDescriptorBufferAddressSpaceSize_ } + , resourceDescriptorBufferAddressSpaceSize{ resourceDescriptorBufferAddressSpaceSize_ } + , descriptorBufferAddressSpaceSize{ descriptorBufferAddressSpaceSize_ } { } @@ -61065,27 +61798,27 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingVariableDescriptorCount_ = {}, VULKAN_HPP_NAMESPACE::Bool32 runtimeDescriptorArray_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderInputAttachmentArrayDynamicIndexing( shaderInputAttachmentArrayDynamicIndexing_ ) - , shaderUniformTexelBufferArrayDynamicIndexing( shaderUniformTexelBufferArrayDynamicIndexing_ ) - , shaderStorageTexelBufferArrayDynamicIndexing( shaderStorageTexelBufferArrayDynamicIndexing_ ) - , shaderUniformBufferArrayNonUniformIndexing( shaderUniformBufferArrayNonUniformIndexing_ ) - , shaderSampledImageArrayNonUniformIndexing( shaderSampledImageArrayNonUniformIndexing_ ) - , shaderStorageBufferArrayNonUniformIndexing( shaderStorageBufferArrayNonUniformIndexing_ ) - , shaderStorageImageArrayNonUniformIndexing( shaderStorageImageArrayNonUniformIndexing_ ) - , shaderInputAttachmentArrayNonUniformIndexing( shaderInputAttachmentArrayNonUniformIndexing_ ) - , shaderUniformTexelBufferArrayNonUniformIndexing( shaderUniformTexelBufferArrayNonUniformIndexing_ ) - , shaderStorageTexelBufferArrayNonUniformIndexing( shaderStorageTexelBufferArrayNonUniformIndexing_ ) - , descriptorBindingUniformBufferUpdateAfterBind( descriptorBindingUniformBufferUpdateAfterBind_ ) - , descriptorBindingSampledImageUpdateAfterBind( descriptorBindingSampledImageUpdateAfterBind_ ) - , descriptorBindingStorageImageUpdateAfterBind( descriptorBindingStorageImageUpdateAfterBind_ ) - , descriptorBindingStorageBufferUpdateAfterBind( descriptorBindingStorageBufferUpdateAfterBind_ ) - , descriptorBindingUniformTexelBufferUpdateAfterBind( descriptorBindingUniformTexelBufferUpdateAfterBind_ ) - , descriptorBindingStorageTexelBufferUpdateAfterBind( descriptorBindingStorageTexelBufferUpdateAfterBind_ ) - , descriptorBindingUpdateUnusedWhilePending( descriptorBindingUpdateUnusedWhilePending_ ) - , descriptorBindingPartiallyBound( descriptorBindingPartiallyBound_ ) - , descriptorBindingVariableDescriptorCount( descriptorBindingVariableDescriptorCount_ ) - , runtimeDescriptorArray( runtimeDescriptorArray_ ) + : pNext{ pNext_ } + , shaderInputAttachmentArrayDynamicIndexing{ shaderInputAttachmentArrayDynamicIndexing_ } + , shaderUniformTexelBufferArrayDynamicIndexing{ shaderUniformTexelBufferArrayDynamicIndexing_ } + , shaderStorageTexelBufferArrayDynamicIndexing{ shaderStorageTexelBufferArrayDynamicIndexing_ } + , shaderUniformBufferArrayNonUniformIndexing{ shaderUniformBufferArrayNonUniformIndexing_ } + , shaderSampledImageArrayNonUniformIndexing{ shaderSampledImageArrayNonUniformIndexing_ } + , shaderStorageBufferArrayNonUniformIndexing{ shaderStorageBufferArrayNonUniformIndexing_ } + , shaderStorageImageArrayNonUniformIndexing{ shaderStorageImageArrayNonUniformIndexing_ } + , shaderInputAttachmentArrayNonUniformIndexing{ shaderInputAttachmentArrayNonUniformIndexing_ } + , shaderUniformTexelBufferArrayNonUniformIndexing{ shaderUniformTexelBufferArrayNonUniformIndexing_ } + , shaderStorageTexelBufferArrayNonUniformIndexing{ shaderStorageTexelBufferArrayNonUniformIndexing_ } + , descriptorBindingUniformBufferUpdateAfterBind{ descriptorBindingUniformBufferUpdateAfterBind_ } + , descriptorBindingSampledImageUpdateAfterBind{ descriptorBindingSampledImageUpdateAfterBind_ } + , descriptorBindingStorageImageUpdateAfterBind{ descriptorBindingStorageImageUpdateAfterBind_ } + , descriptorBindingStorageBufferUpdateAfterBind{ descriptorBindingStorageBufferUpdateAfterBind_ } + , descriptorBindingUniformTexelBufferUpdateAfterBind{ descriptorBindingUniformTexelBufferUpdateAfterBind_ } + , descriptorBindingStorageTexelBufferUpdateAfterBind{ descriptorBindingStorageTexelBufferUpdateAfterBind_ } + , descriptorBindingUpdateUnusedWhilePending{ descriptorBindingUpdateUnusedWhilePending_ } + , descriptorBindingPartiallyBound{ descriptorBindingPartiallyBound_ } + , descriptorBindingVariableDescriptorCount{ descriptorBindingVariableDescriptorCount_ } + , runtimeDescriptorArray{ runtimeDescriptorArray_ } { } @@ -61420,30 +62153,30 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxDescriptorSetUpdateAfterBindStorageImages_ = {}, uint32_t maxDescriptorSetUpdateAfterBindInputAttachments_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxUpdateAfterBindDescriptorsInAllPools( maxUpdateAfterBindDescriptorsInAllPools_ ) - , shaderUniformBufferArrayNonUniformIndexingNative( shaderUniformBufferArrayNonUniformIndexingNative_ ) - , shaderSampledImageArrayNonUniformIndexingNative( shaderSampledImageArrayNonUniformIndexingNative_ ) - , shaderStorageBufferArrayNonUniformIndexingNative( shaderStorageBufferArrayNonUniformIndexingNative_ ) - , shaderStorageImageArrayNonUniformIndexingNative( shaderStorageImageArrayNonUniformIndexingNative_ ) - , shaderInputAttachmentArrayNonUniformIndexingNative( shaderInputAttachmentArrayNonUniformIndexingNative_ ) - , robustBufferAccessUpdateAfterBind( robustBufferAccessUpdateAfterBind_ ) - , quadDivergentImplicitLod( quadDivergentImplicitLod_ ) - , maxPerStageDescriptorUpdateAfterBindSamplers( maxPerStageDescriptorUpdateAfterBindSamplers_ ) - , maxPerStageDescriptorUpdateAfterBindUniformBuffers( maxPerStageDescriptorUpdateAfterBindUniformBuffers_ ) - , maxPerStageDescriptorUpdateAfterBindStorageBuffers( maxPerStageDescriptorUpdateAfterBindStorageBuffers_ ) - , maxPerStageDescriptorUpdateAfterBindSampledImages( maxPerStageDescriptorUpdateAfterBindSampledImages_ ) - , maxPerStageDescriptorUpdateAfterBindStorageImages( maxPerStageDescriptorUpdateAfterBindStorageImages_ ) - , maxPerStageDescriptorUpdateAfterBindInputAttachments( maxPerStageDescriptorUpdateAfterBindInputAttachments_ ) - , maxPerStageUpdateAfterBindResources( maxPerStageUpdateAfterBindResources_ ) - , maxDescriptorSetUpdateAfterBindSamplers( maxDescriptorSetUpdateAfterBindSamplers_ ) - , maxDescriptorSetUpdateAfterBindUniformBuffers( maxDescriptorSetUpdateAfterBindUniformBuffers_ ) - , maxDescriptorSetUpdateAfterBindUniformBuffersDynamic( maxDescriptorSetUpdateAfterBindUniformBuffersDynamic_ ) - , maxDescriptorSetUpdateAfterBindStorageBuffers( maxDescriptorSetUpdateAfterBindStorageBuffers_ ) - , maxDescriptorSetUpdateAfterBindStorageBuffersDynamic( maxDescriptorSetUpdateAfterBindStorageBuffersDynamic_ ) - , maxDescriptorSetUpdateAfterBindSampledImages( maxDescriptorSetUpdateAfterBindSampledImages_ ) - , maxDescriptorSetUpdateAfterBindStorageImages( maxDescriptorSetUpdateAfterBindStorageImages_ ) - , maxDescriptorSetUpdateAfterBindInputAttachments( maxDescriptorSetUpdateAfterBindInputAttachments_ ) + : pNext{ pNext_ } + , maxUpdateAfterBindDescriptorsInAllPools{ maxUpdateAfterBindDescriptorsInAllPools_ } + , shaderUniformBufferArrayNonUniformIndexingNative{ shaderUniformBufferArrayNonUniformIndexingNative_ } + , shaderSampledImageArrayNonUniformIndexingNative{ shaderSampledImageArrayNonUniformIndexingNative_ } + , shaderStorageBufferArrayNonUniformIndexingNative{ shaderStorageBufferArrayNonUniformIndexingNative_ } + , shaderStorageImageArrayNonUniformIndexingNative{ shaderStorageImageArrayNonUniformIndexingNative_ } + , shaderInputAttachmentArrayNonUniformIndexingNative{ shaderInputAttachmentArrayNonUniformIndexingNative_ } + , robustBufferAccessUpdateAfterBind{ robustBufferAccessUpdateAfterBind_ } + , quadDivergentImplicitLod{ quadDivergentImplicitLod_ } + , maxPerStageDescriptorUpdateAfterBindSamplers{ maxPerStageDescriptorUpdateAfterBindSamplers_ } + , maxPerStageDescriptorUpdateAfterBindUniformBuffers{ maxPerStageDescriptorUpdateAfterBindUniformBuffers_ } + , maxPerStageDescriptorUpdateAfterBindStorageBuffers{ maxPerStageDescriptorUpdateAfterBindStorageBuffers_ } + , maxPerStageDescriptorUpdateAfterBindSampledImages{ maxPerStageDescriptorUpdateAfterBindSampledImages_ } + , maxPerStageDescriptorUpdateAfterBindStorageImages{ maxPerStageDescriptorUpdateAfterBindStorageImages_ } + , maxPerStageDescriptorUpdateAfterBindInputAttachments{ maxPerStageDescriptorUpdateAfterBindInputAttachments_ } + , maxPerStageUpdateAfterBindResources{ maxPerStageUpdateAfterBindResources_ } + , maxDescriptorSetUpdateAfterBindSamplers{ maxDescriptorSetUpdateAfterBindSamplers_ } + , maxDescriptorSetUpdateAfterBindUniformBuffers{ maxDescriptorSetUpdateAfterBindUniformBuffers_ } + , maxDescriptorSetUpdateAfterBindUniformBuffersDynamic{ maxDescriptorSetUpdateAfterBindUniformBuffersDynamic_ } + , maxDescriptorSetUpdateAfterBindStorageBuffers{ maxDescriptorSetUpdateAfterBindStorageBuffers_ } + , maxDescriptorSetUpdateAfterBindStorageBuffersDynamic{ maxDescriptorSetUpdateAfterBindStorageBuffersDynamic_ } + , maxDescriptorSetUpdateAfterBindSampledImages{ maxDescriptorSetUpdateAfterBindSampledImages_ } + , maxDescriptorSetUpdateAfterBindStorageImages{ maxDescriptorSetUpdateAfterBindStorageImages_ } + , maxDescriptorSetUpdateAfterBindInputAttachments{ maxDescriptorSetUpdateAfterBindInputAttachments_ } { } @@ -61618,8 +62351,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDescriptorPoolOverallocationFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 descriptorPoolOverallocation_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , descriptorPoolOverallocation( descriptorPoolOverallocation_ ) + : pNext{ pNext_ } + , descriptorPoolOverallocation{ descriptorPoolOverallocation_ } { } @@ -61718,8 +62451,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDescriptorSetHostMappingFeaturesVALVE( VULKAN_HPP_NAMESPACE::Bool32 descriptorSetHostMapping_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , descriptorSetHostMapping( descriptorSetHostMapping_ ) + : pNext{ pNext_ } + , descriptorSetHostMapping{ descriptorSetHostMapping_ } { } @@ -61820,10 +62553,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 deviceGeneratedComputePipelines_ = {}, VULKAN_HPP_NAMESPACE::Bool32 deviceGeneratedComputeCaptureReplay_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceGeneratedCompute( deviceGeneratedCompute_ ) - , deviceGeneratedComputePipelines( deviceGeneratedComputePipelines_ ) - , deviceGeneratedComputeCaptureReplay( deviceGeneratedComputeCaptureReplay_ ) + : pNext{ pNext_ } + , deviceGeneratedCompute{ deviceGeneratedCompute_ } + , deviceGeneratedComputePipelines{ deviceGeneratedComputePipelines_ } + , deviceGeneratedComputeCaptureReplay{ deviceGeneratedComputeCaptureReplay_ } { } @@ -61945,8 +62678,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDeviceGeneratedCommandsFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 deviceGeneratedCommands_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceGeneratedCommands( deviceGeneratedCommands_ ) + : pNext{ pNext_ } + , deviceGeneratedCommands{ deviceGeneratedCommands_ } { } @@ -62052,16 +62785,16 @@ namespace VULKAN_HPP_NAMESPACE uint32_t minSequencesIndexBufferOffsetAlignment_ = {}, uint32_t minIndirectCommandsBufferOffsetAlignment_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxGraphicsShaderGroupCount( maxGraphicsShaderGroupCount_ ) - , maxIndirectSequenceCount( maxIndirectSequenceCount_ ) - , maxIndirectCommandsTokenCount( maxIndirectCommandsTokenCount_ ) - , maxIndirectCommandsStreamCount( maxIndirectCommandsStreamCount_ ) - , maxIndirectCommandsTokenOffset( maxIndirectCommandsTokenOffset_ ) - , maxIndirectCommandsStreamStride( maxIndirectCommandsStreamStride_ ) - , minSequencesCountBufferOffsetAlignment( minSequencesCountBufferOffsetAlignment_ ) - , minSequencesIndexBufferOffsetAlignment( minSequencesIndexBufferOffsetAlignment_ ) - , minIndirectCommandsBufferOffsetAlignment( minIndirectCommandsBufferOffsetAlignment_ ) + : pNext{ pNext_ } + , maxGraphicsShaderGroupCount{ maxGraphicsShaderGroupCount_ } + , maxIndirectSequenceCount{ maxIndirectSequenceCount_ } + , maxIndirectCommandsTokenCount{ maxIndirectCommandsTokenCount_ } + , maxIndirectCommandsStreamCount{ maxIndirectCommandsStreamCount_ } + , maxIndirectCommandsTokenOffset{ maxIndirectCommandsTokenOffset_ } + , maxIndirectCommandsStreamStride{ maxIndirectCommandsStreamStride_ } + , minSequencesCountBufferOffsetAlignment{ minSequencesCountBufferOffsetAlignment_ } + , minSequencesIndexBufferOffsetAlignment{ minSequencesIndexBufferOffsetAlignment_ } + , minIndirectCommandsBufferOffsetAlignment{ minIndirectCommandsBufferOffsetAlignment_ } { } @@ -62180,8 +62913,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDeviceMemoryReportFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 deviceMemoryReport_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceMemoryReport( deviceMemoryReport_ ) + : pNext{ pNext_ } + , deviceMemoryReport{ deviceMemoryReport_ } { } @@ -62278,8 +63011,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDiagnosticsConfigFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 diagnosticsConfig_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , diagnosticsConfig( diagnosticsConfig_ ) + : pNext{ pNext_ } + , diagnosticsConfig{ diagnosticsConfig_ } { } @@ -62375,8 +63108,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDiscardRectanglePropertiesEXT( uint32_t maxDiscardRectangles_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxDiscardRectangles( maxDiscardRectangles_ ) + : pNext{ pNext_ } + , maxDiscardRectangles{ maxDiscardRectangles_ } { } @@ -62459,8 +63192,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDisplacementMicromapFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 displacementMicromap_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , displacementMicromap( displacementMicromap_ ) + : pNext{ pNext_ } + , displacementMicromap{ displacementMicromap_ } { } @@ -62559,8 +63292,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDisplacementMicromapPropertiesNV( uint32_t maxDisplacementMicromapSubdivisionLevel_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxDisplacementMicromapSubdivisionLevel( maxDisplacementMicromapSubdivisionLevel_ ) + : pNext{ pNext_ } + , maxDisplacementMicromapSubdivisionLevel{ maxDisplacementMicromapSubdivisionLevel_ } { } @@ -62647,11 +63380,11 @@ namespace VULKAN_HPP_NAMESPACE std::array const & driverInfo_ = {}, VULKAN_HPP_NAMESPACE::ConformanceVersion conformanceVersion_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , driverID( driverID_ ) - , driverName( driverName_ ) - , driverInfo( driverInfo_ ) - , conformanceVersion( conformanceVersion_ ) + : pNext{ pNext_ } + , driverID{ driverID_ } + , driverName{ driverName_ } + , driverInfo{ driverInfo_ } + , conformanceVersion{ conformanceVersion_ } { } @@ -62662,30 +63395,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PhysicalDeviceDriverProperties( VULKAN_HPP_NAMESPACE::DriverId driverID_, - std::string const & driverName_, - std::string const & driverInfo_ = {}, - VULKAN_HPP_NAMESPACE::ConformanceVersion conformanceVersion_ = {}, - void * pNext_ = nullptr ) - : pNext( pNext_ ), driverID( driverID_ ), conformanceVersion( conformanceVersion_ ) - { - VULKAN_HPP_ASSERT( driverName_.size() < VK_MAX_DRIVER_NAME_SIZE ); -# if defined( WIN32 ) - strncpy_s( driverName, VK_MAX_DRIVER_NAME_SIZE, driverName_.data(), driverName_.size() ); -# else - strncpy( driverName, driverName_.data(), std::min( VK_MAX_DRIVER_NAME_SIZE, driverName_.size() ) ); -# endif - - VULKAN_HPP_ASSERT( driverInfo_.size() < VK_MAX_DRIVER_INFO_SIZE ); -# if defined( WIN32 ) - strncpy_s( driverInfo, VK_MAX_DRIVER_INFO_SIZE, driverInfo_.data(), driverInfo_.size() ); -# else - strncpy( driverInfo, driverInfo_.data(), std::min( VK_MAX_DRIVER_INFO_SIZE, driverInfo_.size() ) ); -# endif - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - PhysicalDeviceDriverProperties & operator=( PhysicalDeviceDriverProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -62785,13 +63494,13 @@ namespace VULKAN_HPP_NAMESPACE int64_t renderMajor_ = {}, int64_t renderMinor_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , hasPrimary( hasPrimary_ ) - , hasRender( hasRender_ ) - , primaryMajor( primaryMajor_ ) - , primaryMinor( primaryMinor_ ) - , renderMajor( renderMajor_ ) - , renderMinor( renderMinor_ ) + : pNext{ pNext_ } + , hasPrimary{ hasPrimary_ } + , hasRender{ hasRender_ } + , primaryMajor{ primaryMajor_ } + , primaryMinor{ primaryMinor_ } + , renderMajor{ renderMajor_ } + , renderMinor{ renderMinor_ } { } @@ -62887,8 +63596,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDynamicRenderingFeatures( VULKAN_HPP_NAMESPACE::Bool32 dynamicRendering_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , dynamicRendering( dynamicRendering_ ) + : pNext{ pNext_ } + , dynamicRendering{ dynamicRendering_ } { } @@ -62986,8 +63695,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 dynamicRenderingLocalRead_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , dynamicRenderingLocalRead( dynamicRenderingLocalRead_ ) + : pNext{ pNext_ } + , dynamicRenderingLocalRead{ dynamicRenderingLocalRead_ } { } @@ -63086,8 +63795,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 dynamicRenderingUnusedAttachments_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , dynamicRenderingUnusedAttachments( dynamicRenderingUnusedAttachments_ ) + : pNext{ pNext_ } + , dynamicRenderingUnusedAttachments{ dynamicRenderingUnusedAttachments_ } { } @@ -63188,8 +63897,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceExclusiveScissorFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 exclusiveScissor_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , exclusiveScissor( exclusiveScissor_ ) + : pNext{ pNext_ } + , exclusiveScissor{ exclusiveScissor_ } { } @@ -63287,10 +63996,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 extendedDynamicState2LogicOp_ = {}, VULKAN_HPP_NAMESPACE::Bool32 extendedDynamicState2PatchControlPoints_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , extendedDynamicState2( extendedDynamicState2_ ) - , extendedDynamicState2LogicOp( extendedDynamicState2LogicOp_ ) - , extendedDynamicState2PatchControlPoints( extendedDynamicState2PatchControlPoints_ ) + : pNext{ pNext_ } + , extendedDynamicState2{ extendedDynamicState2_ } + , extendedDynamicState2LogicOp{ extendedDynamicState2LogicOp_ } + , extendedDynamicState2PatchControlPoints{ extendedDynamicState2PatchControlPoints_ } { } @@ -63441,38 +64150,38 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 extendedDynamicState3RepresentativeFragmentTestEnable_ = {}, VULKAN_HPP_NAMESPACE::Bool32 extendedDynamicState3ShadingRateImageEnable_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , extendedDynamicState3TessellationDomainOrigin( extendedDynamicState3TessellationDomainOrigin_ ) - , extendedDynamicState3DepthClampEnable( extendedDynamicState3DepthClampEnable_ ) - , extendedDynamicState3PolygonMode( extendedDynamicState3PolygonMode_ ) - , extendedDynamicState3RasterizationSamples( extendedDynamicState3RasterizationSamples_ ) - , extendedDynamicState3SampleMask( extendedDynamicState3SampleMask_ ) - , extendedDynamicState3AlphaToCoverageEnable( extendedDynamicState3AlphaToCoverageEnable_ ) - , extendedDynamicState3AlphaToOneEnable( extendedDynamicState3AlphaToOneEnable_ ) - , extendedDynamicState3LogicOpEnable( extendedDynamicState3LogicOpEnable_ ) - , extendedDynamicState3ColorBlendEnable( extendedDynamicState3ColorBlendEnable_ ) - , extendedDynamicState3ColorBlendEquation( extendedDynamicState3ColorBlendEquation_ ) - , extendedDynamicState3ColorWriteMask( extendedDynamicState3ColorWriteMask_ ) - , extendedDynamicState3RasterizationStream( extendedDynamicState3RasterizationStream_ ) - , extendedDynamicState3ConservativeRasterizationMode( extendedDynamicState3ConservativeRasterizationMode_ ) - , extendedDynamicState3ExtraPrimitiveOverestimationSize( extendedDynamicState3ExtraPrimitiveOverestimationSize_ ) - , extendedDynamicState3DepthClipEnable( extendedDynamicState3DepthClipEnable_ ) - , extendedDynamicState3SampleLocationsEnable( extendedDynamicState3SampleLocationsEnable_ ) - , extendedDynamicState3ColorBlendAdvanced( extendedDynamicState3ColorBlendAdvanced_ ) - , extendedDynamicState3ProvokingVertexMode( extendedDynamicState3ProvokingVertexMode_ ) - , extendedDynamicState3LineRasterizationMode( extendedDynamicState3LineRasterizationMode_ ) - , extendedDynamicState3LineStippleEnable( extendedDynamicState3LineStippleEnable_ ) - , extendedDynamicState3DepthClipNegativeOneToOne( extendedDynamicState3DepthClipNegativeOneToOne_ ) - , extendedDynamicState3ViewportWScalingEnable( extendedDynamicState3ViewportWScalingEnable_ ) - , extendedDynamicState3ViewportSwizzle( extendedDynamicState3ViewportSwizzle_ ) - , extendedDynamicState3CoverageToColorEnable( extendedDynamicState3CoverageToColorEnable_ ) - , extendedDynamicState3CoverageToColorLocation( extendedDynamicState3CoverageToColorLocation_ ) - , extendedDynamicState3CoverageModulationMode( extendedDynamicState3CoverageModulationMode_ ) - , extendedDynamicState3CoverageModulationTableEnable( extendedDynamicState3CoverageModulationTableEnable_ ) - , extendedDynamicState3CoverageModulationTable( extendedDynamicState3CoverageModulationTable_ ) - , extendedDynamicState3CoverageReductionMode( extendedDynamicState3CoverageReductionMode_ ) - , extendedDynamicState3RepresentativeFragmentTestEnable( extendedDynamicState3RepresentativeFragmentTestEnable_ ) - , extendedDynamicState3ShadingRateImageEnable( extendedDynamicState3ShadingRateImageEnable_ ) + : pNext{ pNext_ } + , extendedDynamicState3TessellationDomainOrigin{ extendedDynamicState3TessellationDomainOrigin_ } + , extendedDynamicState3DepthClampEnable{ extendedDynamicState3DepthClampEnable_ } + , extendedDynamicState3PolygonMode{ extendedDynamicState3PolygonMode_ } + , extendedDynamicState3RasterizationSamples{ extendedDynamicState3RasterizationSamples_ } + , extendedDynamicState3SampleMask{ extendedDynamicState3SampleMask_ } + , extendedDynamicState3AlphaToCoverageEnable{ extendedDynamicState3AlphaToCoverageEnable_ } + , extendedDynamicState3AlphaToOneEnable{ extendedDynamicState3AlphaToOneEnable_ } + , extendedDynamicState3LogicOpEnable{ extendedDynamicState3LogicOpEnable_ } + , extendedDynamicState3ColorBlendEnable{ extendedDynamicState3ColorBlendEnable_ } + , extendedDynamicState3ColorBlendEquation{ extendedDynamicState3ColorBlendEquation_ } + , extendedDynamicState3ColorWriteMask{ extendedDynamicState3ColorWriteMask_ } + , extendedDynamicState3RasterizationStream{ extendedDynamicState3RasterizationStream_ } + , extendedDynamicState3ConservativeRasterizationMode{ extendedDynamicState3ConservativeRasterizationMode_ } + , extendedDynamicState3ExtraPrimitiveOverestimationSize{ extendedDynamicState3ExtraPrimitiveOverestimationSize_ } + , extendedDynamicState3DepthClipEnable{ extendedDynamicState3DepthClipEnable_ } + , extendedDynamicState3SampleLocationsEnable{ extendedDynamicState3SampleLocationsEnable_ } + , extendedDynamicState3ColorBlendAdvanced{ extendedDynamicState3ColorBlendAdvanced_ } + , extendedDynamicState3ProvokingVertexMode{ extendedDynamicState3ProvokingVertexMode_ } + , extendedDynamicState3LineRasterizationMode{ extendedDynamicState3LineRasterizationMode_ } + , extendedDynamicState3LineStippleEnable{ extendedDynamicState3LineStippleEnable_ } + , extendedDynamicState3DepthClipNegativeOneToOne{ extendedDynamicState3DepthClipNegativeOneToOne_ } + , extendedDynamicState3ViewportWScalingEnable{ extendedDynamicState3ViewportWScalingEnable_ } + , extendedDynamicState3ViewportSwizzle{ extendedDynamicState3ViewportSwizzle_ } + , extendedDynamicState3CoverageToColorEnable{ extendedDynamicState3CoverageToColorEnable_ } + , extendedDynamicState3CoverageToColorLocation{ extendedDynamicState3CoverageToColorLocation_ } + , extendedDynamicState3CoverageModulationMode{ extendedDynamicState3CoverageModulationMode_ } + , extendedDynamicState3CoverageModulationTableEnable{ extendedDynamicState3CoverageModulationTableEnable_ } + , extendedDynamicState3CoverageModulationTable{ extendedDynamicState3CoverageModulationTable_ } + , extendedDynamicState3CoverageReductionMode{ extendedDynamicState3CoverageReductionMode_ } + , extendedDynamicState3RepresentativeFragmentTestEnable{ extendedDynamicState3RepresentativeFragmentTestEnable_ } + , extendedDynamicState3ShadingRateImageEnable{ extendedDynamicState3ShadingRateImageEnable_ } { } @@ -63905,8 +64614,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceExtendedDynamicState3PropertiesEXT( VULKAN_HPP_NAMESPACE::Bool32 dynamicPrimitiveTopologyUnrestricted_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , dynamicPrimitiveTopologyUnrestricted( dynamicPrimitiveTopologyUnrestricted_ ) + : pNext{ pNext_ } + , dynamicPrimitiveTopologyUnrestricted{ dynamicPrimitiveTopologyUnrestricted_ } { } @@ -64004,8 +64713,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceExtendedDynamicStateFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 extendedDynamicState_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , extendedDynamicState( extendedDynamicState_ ) + : pNext{ pNext_ } + , extendedDynamicState{ extendedDynamicState_ } { } @@ -64103,8 +64812,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceExtendedSparseAddressSpaceFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 extendedSparseAddressSpace_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , extendedSparseAddressSpace( extendedSparseAddressSpace_ ) + : pNext{ pNext_ } + , extendedSparseAddressSpace{ extendedSparseAddressSpace_ } { } @@ -64205,10 +64914,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageUsageFlags extendedSparseImageUsageFlags_ = {}, VULKAN_HPP_NAMESPACE::BufferUsageFlags extendedSparseBufferUsageFlags_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , extendedSparseAddressSpaceSize( extendedSparseAddressSpaceSize_ ) - , extendedSparseImageUsageFlags( extendedSparseImageUsageFlags_ ) - , extendedSparseBufferUsageFlags( extendedSparseBufferUsageFlags_ ) + : pNext{ pNext_ } + , extendedSparseAddressSpaceSize{ extendedSparseAddressSpaceSize_ } + , extendedSparseImageUsageFlags{ extendedSparseImageUsageFlags_ } + , extendedSparseBufferUsageFlags{ extendedSparseBufferUsageFlags_ } { } @@ -64302,10 +65011,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::BufferUsageFlags usage_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , usage( usage_ ) - , handleType( handleType_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , usage{ usage_ } + , handleType{ handleType_ } { } @@ -64423,8 +65132,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalFenceInfo( VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits::eOpaqueFd, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleType( handleType_ ) + : pNext{ pNext_ } + , handleType{ handleType_ } { } @@ -64524,8 +65233,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalFormatResolveFeaturesANDROID( VULKAN_HPP_NAMESPACE::Bool32 externalFormatResolve_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , externalFormatResolve( externalFormatResolve_ ) + : pNext{ pNext_ } + , externalFormatResolve{ externalFormatResolve_ } { } @@ -64629,10 +65338,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ChromaLocation externalFormatResolveChromaOffsetX_ = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven, VULKAN_HPP_NAMESPACE::ChromaLocation externalFormatResolveChromaOffsetY_ = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , nullColorAttachmentWithExternalFormatResolve( nullColorAttachmentWithExternalFormatResolve_ ) - , externalFormatResolveChromaOffsetX( externalFormatResolveChromaOffsetX_ ) - , externalFormatResolveChromaOffsetY( externalFormatResolveChromaOffsetY_ ) + : pNext{ pNext_ } + , nullColorAttachmentWithExternalFormatResolve{ nullColorAttachmentWithExternalFormatResolve_ } + , externalFormatResolveChromaOffsetX{ externalFormatResolveChromaOffsetX_ } + , externalFormatResolveChromaOffsetY{ externalFormatResolveChromaOffsetY_ } { } @@ -64727,8 +65436,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalImageFormatInfo( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleType( handleType_ ) + : pNext{ pNext_ } + , handleType{ handleType_ } { } @@ -64827,8 +65536,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalMemoryHostPropertiesEXT( VULKAN_HPP_NAMESPACE::DeviceSize minImportedHostPointerAlignment_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , minImportedHostPointerAlignment( minImportedHostPointerAlignment_ ) + : pNext{ pNext_ } + , minImportedHostPointerAlignment{ minImportedHostPointerAlignment_ } { } @@ -64911,8 +65620,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalMemoryRDMAFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 externalMemoryRDMA_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , externalMemoryRDMA( externalMemoryRDMA_ ) + : pNext{ pNext_ } + , externalMemoryRDMA{ externalMemoryRDMA_ } { } @@ -65010,8 +65719,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalMemoryScreenBufferFeaturesQNX( VULKAN_HPP_NAMESPACE::Bool32 screenBufferImport_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , screenBufferImport( screenBufferImport_ ) + : pNext{ pNext_ } + , screenBufferImport{ screenBufferImport_ } { } @@ -65112,8 +65821,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalSemaphoreInfo( VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , handleType( handleType_ ) + : pNext{ pNext_ } + , handleType{ handleType_ } { } @@ -65213,9 +65922,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceFaultFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 deviceFault_ = {}, VULKAN_HPP_NAMESPACE::Bool32 deviceFaultVendorBinary_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceFault( deviceFault_ ) - , deviceFaultVendorBinary( deviceFaultVendorBinary_ ) + : pNext{ pNext_ } + , deviceFault{ deviceFault_ } + , deviceFaultVendorBinary{ deviceFaultVendorBinary_ } { } @@ -65319,8 +66028,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceFeatures2( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures features_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , features( features_ ) + : pNext{ pNext_ } + , features{ features_ } { } @@ -65435,24 +66144,24 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat32_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat64_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , denormBehaviorIndependence( denormBehaviorIndependence_ ) - , roundingModeIndependence( roundingModeIndependence_ ) - , shaderSignedZeroInfNanPreserveFloat16( shaderSignedZeroInfNanPreserveFloat16_ ) - , shaderSignedZeroInfNanPreserveFloat32( shaderSignedZeroInfNanPreserveFloat32_ ) - , shaderSignedZeroInfNanPreserveFloat64( shaderSignedZeroInfNanPreserveFloat64_ ) - , shaderDenormPreserveFloat16( shaderDenormPreserveFloat16_ ) - , shaderDenormPreserveFloat32( shaderDenormPreserveFloat32_ ) - , shaderDenormPreserveFloat64( shaderDenormPreserveFloat64_ ) - , shaderDenormFlushToZeroFloat16( shaderDenormFlushToZeroFloat16_ ) - , shaderDenormFlushToZeroFloat32( shaderDenormFlushToZeroFloat32_ ) - , shaderDenormFlushToZeroFloat64( shaderDenormFlushToZeroFloat64_ ) - , shaderRoundingModeRTEFloat16( shaderRoundingModeRTEFloat16_ ) - , shaderRoundingModeRTEFloat32( shaderRoundingModeRTEFloat32_ ) - , shaderRoundingModeRTEFloat64( shaderRoundingModeRTEFloat64_ ) - , shaderRoundingModeRTZFloat16( shaderRoundingModeRTZFloat16_ ) - , shaderRoundingModeRTZFloat32( shaderRoundingModeRTZFloat32_ ) - , shaderRoundingModeRTZFloat64( shaderRoundingModeRTZFloat64_ ) + : pNext{ pNext_ } + , denormBehaviorIndependence{ denormBehaviorIndependence_ } + , roundingModeIndependence{ roundingModeIndependence_ } + , shaderSignedZeroInfNanPreserveFloat16{ shaderSignedZeroInfNanPreserveFloat16_ } + , shaderSignedZeroInfNanPreserveFloat32{ shaderSignedZeroInfNanPreserveFloat32_ } + , shaderSignedZeroInfNanPreserveFloat64{ shaderSignedZeroInfNanPreserveFloat64_ } + , shaderDenormPreserveFloat16{ shaderDenormPreserveFloat16_ } + , shaderDenormPreserveFloat32{ shaderDenormPreserveFloat32_ } + , shaderDenormPreserveFloat64{ shaderDenormPreserveFloat64_ } + , shaderDenormFlushToZeroFloat16{ shaderDenormFlushToZeroFloat16_ } + , shaderDenormFlushToZeroFloat32{ shaderDenormFlushToZeroFloat32_ } + , shaderDenormFlushToZeroFloat64{ shaderDenormFlushToZeroFloat64_ } + , shaderRoundingModeRTEFloat16{ shaderRoundingModeRTEFloat16_ } + , shaderRoundingModeRTEFloat32{ shaderRoundingModeRTEFloat32_ } + , shaderRoundingModeRTEFloat64{ shaderRoundingModeRTEFloat64_ } + , shaderRoundingModeRTZFloat16{ shaderRoundingModeRTZFloat16_ } + , shaderRoundingModeRTZFloat32{ shaderRoundingModeRTZFloat32_ } + , shaderRoundingModeRTZFloat64{ shaderRoundingModeRTZFloat64_ } { } @@ -65599,8 +66308,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentDensityMap2FeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMapDeferred_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fragmentDensityMapDeferred( fragmentDensityMapDeferred_ ) + : pNext{ pNext_ } + , fragmentDensityMapDeferred{ fragmentDensityMapDeferred_ } { } @@ -65700,11 +66409,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxSubsampledArrayLayers_ = {}, uint32_t maxDescriptorSetSubsampledSamplers_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , subsampledLoads( subsampledLoads_ ) - , subsampledCoarseReconstructionEarlyAccess( subsampledCoarseReconstructionEarlyAccess_ ) - , maxSubsampledArrayLayers( maxSubsampledArrayLayers_ ) - , maxDescriptorSetSubsampledSamplers( maxDescriptorSetSubsampledSamplers_ ) + : pNext{ pNext_ } + , subsampledLoads{ subsampledLoads_ } + , subsampledCoarseReconstructionEarlyAccess{ subsampledCoarseReconstructionEarlyAccess_ } + , maxSubsampledArrayLayers{ maxSubsampledArrayLayers_ } + , maxDescriptorSetSubsampledSamplers{ maxDescriptorSetSubsampledSamplers_ } { } @@ -65799,10 +66508,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMapDynamic_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMapNonSubsampledImages_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fragmentDensityMap( fragmentDensityMap_ ) - , fragmentDensityMapDynamic( fragmentDensityMapDynamic_ ) - , fragmentDensityMapNonSubsampledImages( fragmentDensityMapNonSubsampledImages_ ) + : pNext{ pNext_ } + , fragmentDensityMap{ fragmentDensityMap_ } + , fragmentDensityMapDynamic{ fragmentDensityMapDynamic_ } + , fragmentDensityMapNonSubsampledImages{ fragmentDensityMapNonSubsampledImages_ } { } @@ -65921,8 +66630,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM( VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMapOffset_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fragmentDensityMapOffset( fragmentDensityMapOffset_ ) + : pNext{ pNext_ } + , fragmentDensityMapOffset{ fragmentDensityMapOffset_ } { } @@ -66021,8 +66730,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentDensityMapOffsetPropertiesQCOM( VULKAN_HPP_NAMESPACE::Extent2D fragmentDensityOffsetGranularity_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fragmentDensityOffsetGranularity( fragmentDensityOffsetGranularity_ ) + : pNext{ pNext_ } + , fragmentDensityOffsetGranularity{ fragmentDensityOffsetGranularity_ } { } @@ -66108,10 +66817,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Extent2D maxFragmentDensityTexelSize_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityInvocations_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , minFragmentDensityTexelSize( minFragmentDensityTexelSize_ ) - , maxFragmentDensityTexelSize( maxFragmentDensityTexelSize_ ) - , fragmentDensityInvocations( fragmentDensityInvocations_ ) + : pNext{ pNext_ } + , minFragmentDensityTexelSize{ minFragmentDensityTexelSize_ } + , maxFragmentDensityTexelSize{ maxFragmentDensityTexelSize_ } + , fragmentDensityInvocations{ fragmentDensityInvocations_ } { } @@ -66201,8 +66910,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShaderBarycentricFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderBarycentric_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fragmentShaderBarycentric( fragmentShaderBarycentric_ ) + : pNext{ pNext_ } + , fragmentShaderBarycentric{ fragmentShaderBarycentric_ } { } @@ -66304,8 +67013,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShaderBarycentricPropertiesKHR( VULKAN_HPP_NAMESPACE::Bool32 triStripVertexOrderIndependentOfProvokingVertex_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , triStripVertexOrderIndependentOfProvokingVertex( triStripVertexOrderIndependentOfProvokingVertex_ ) + : pNext{ pNext_ } + , triStripVertexOrderIndependentOfProvokingVertex{ triStripVertexOrderIndependentOfProvokingVertex_ } { } @@ -66392,10 +67101,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderPixelInterlock_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderShadingRateInterlock_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fragmentShaderSampleInterlock( fragmentShaderSampleInterlock_ ) - , fragmentShaderPixelInterlock( fragmentShaderPixelInterlock_ ) - , fragmentShaderShadingRateInterlock( fragmentShaderShadingRateInterlock_ ) + : pNext{ pNext_ } + , fragmentShaderSampleInterlock{ fragmentShaderSampleInterlock_ } + , fragmentShaderPixelInterlock{ fragmentShaderPixelInterlock_ } + , fragmentShaderShadingRateInterlock{ fragmentShaderShadingRateInterlock_ } { } @@ -66517,10 +67226,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 supersampleFragmentShadingRates_ = {}, VULKAN_HPP_NAMESPACE::Bool32 noInvocationFragmentShadingRates_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fragmentShadingRateEnums( fragmentShadingRateEnums_ ) - , supersampleFragmentShadingRates( supersampleFragmentShadingRates_ ) - , noInvocationFragmentShadingRates( noInvocationFragmentShadingRates_ ) + : pNext{ pNext_ } + , fragmentShadingRateEnums{ fragmentShadingRateEnums_ } + , supersampleFragmentShadingRates{ supersampleFragmentShadingRates_ } + , noInvocationFragmentShadingRates{ noInvocationFragmentShadingRates_ } { } @@ -66641,8 +67350,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateEnumsPropertiesNV( VULKAN_HPP_NAMESPACE::SampleCountFlagBits maxFragmentShadingRateInvocationCount_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxFragmentShadingRateInvocationCount( maxFragmentShadingRateInvocationCount_ ) + : pNext{ pNext_ } + , maxFragmentShadingRateInvocationCount{ maxFragmentShadingRateInvocationCount_ } { } @@ -66743,10 +67452,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 primitiveFragmentShadingRate_ = {}, VULKAN_HPP_NAMESPACE::Bool32 attachmentFragmentShadingRate_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pipelineFragmentShadingRate( pipelineFragmentShadingRate_ ) - , primitiveFragmentShadingRate( primitiveFragmentShadingRate_ ) - , attachmentFragmentShadingRate( attachmentFragmentShadingRate_ ) + : pNext{ pNext_ } + , pipelineFragmentShadingRate{ pipelineFragmentShadingRate_ } + , primitiveFragmentShadingRate{ primitiveFragmentShadingRate_ } + , attachmentFragmentShadingRate{ attachmentFragmentShadingRate_ } { } @@ -66865,9 +67574,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateKHR( VULKAN_HPP_NAMESPACE::SampleCountFlags sampleCounts_ = {}, VULKAN_HPP_NAMESPACE::Extent2D fragmentSize_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , sampleCounts( sampleCounts_ ) - , fragmentSize( fragmentSize_ ) + : pNext{ pNext_ } + , sampleCounts{ sampleCounts_ } + , fragmentSize{ fragmentSize_ } { } @@ -66968,24 +67677,24 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithCustomSampleLocations_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateStrictMultiplyCombiner_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , minFragmentShadingRateAttachmentTexelSize( minFragmentShadingRateAttachmentTexelSize_ ) - , maxFragmentShadingRateAttachmentTexelSize( maxFragmentShadingRateAttachmentTexelSize_ ) - , maxFragmentShadingRateAttachmentTexelSizeAspectRatio( maxFragmentShadingRateAttachmentTexelSizeAspectRatio_ ) - , primitiveFragmentShadingRateWithMultipleViewports( primitiveFragmentShadingRateWithMultipleViewports_ ) - , layeredShadingRateAttachments( layeredShadingRateAttachments_ ) - , fragmentShadingRateNonTrivialCombinerOps( fragmentShadingRateNonTrivialCombinerOps_ ) - , maxFragmentSize( maxFragmentSize_ ) - , maxFragmentSizeAspectRatio( maxFragmentSizeAspectRatio_ ) - , maxFragmentShadingRateCoverageSamples( maxFragmentShadingRateCoverageSamples_ ) - , maxFragmentShadingRateRasterizationSamples( maxFragmentShadingRateRasterizationSamples_ ) - , fragmentShadingRateWithShaderDepthStencilWrites( fragmentShadingRateWithShaderDepthStencilWrites_ ) - , fragmentShadingRateWithSampleMask( fragmentShadingRateWithSampleMask_ ) - , fragmentShadingRateWithShaderSampleMask( fragmentShadingRateWithShaderSampleMask_ ) - , fragmentShadingRateWithConservativeRasterization( fragmentShadingRateWithConservativeRasterization_ ) - , fragmentShadingRateWithFragmentShaderInterlock( fragmentShadingRateWithFragmentShaderInterlock_ ) - , fragmentShadingRateWithCustomSampleLocations( fragmentShadingRateWithCustomSampleLocations_ ) - , fragmentShadingRateStrictMultiplyCombiner( fragmentShadingRateStrictMultiplyCombiner_ ) + : pNext{ pNext_ } + , minFragmentShadingRateAttachmentTexelSize{ minFragmentShadingRateAttachmentTexelSize_ } + , maxFragmentShadingRateAttachmentTexelSize{ maxFragmentShadingRateAttachmentTexelSize_ } + , maxFragmentShadingRateAttachmentTexelSizeAspectRatio{ maxFragmentShadingRateAttachmentTexelSizeAspectRatio_ } + , primitiveFragmentShadingRateWithMultipleViewports{ primitiveFragmentShadingRateWithMultipleViewports_ } + , layeredShadingRateAttachments{ layeredShadingRateAttachments_ } + , fragmentShadingRateNonTrivialCombinerOps{ fragmentShadingRateNonTrivialCombinerOps_ } + , maxFragmentSize{ maxFragmentSize_ } + , maxFragmentSizeAspectRatio{ maxFragmentSizeAspectRatio_ } + , maxFragmentShadingRateCoverageSamples{ maxFragmentShadingRateCoverageSamples_ } + , maxFragmentShadingRateRasterizationSamples{ maxFragmentShadingRateRasterizationSamples_ } + , fragmentShadingRateWithShaderDepthStencilWrites{ fragmentShadingRateWithShaderDepthStencilWrites_ } + , fragmentShadingRateWithSampleMask{ fragmentShadingRateWithSampleMask_ } + , fragmentShadingRateWithShaderSampleMask{ fragmentShadingRateWithShaderSampleMask_ } + , fragmentShadingRateWithConservativeRasterization{ fragmentShadingRateWithConservativeRasterization_ } + , fragmentShadingRateWithFragmentShaderInterlock{ fragmentShadingRateWithFragmentShaderInterlock_ } + , fragmentShadingRateWithCustomSampleLocations{ fragmentShadingRateWithCustomSampleLocations_ } + , fragmentShadingRateStrictMultiplyCombiner{ fragmentShadingRateStrictMultiplyCombiner_ } { } @@ -67135,8 +67844,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceFrameBoundaryFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 frameBoundary_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , frameBoundary( frameBoundary_ ) + : pNext{ pNext_ } + , frameBoundary{ frameBoundary_ } { } @@ -67232,8 +67941,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceGlobalPriorityQueryFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 globalPriorityQuery_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , globalPriorityQuery( globalPriorityQuery_ ) + : pNext{ pNext_ } + , globalPriorityQuery{ globalPriorityQuery_ } { } @@ -67332,8 +68041,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceGraphicsPipelineLibraryFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 graphicsPipelineLibrary_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , graphicsPipelineLibrary( graphicsPipelineLibrary_ ) + : pNext{ pNext_ } + , graphicsPipelineLibrary{ graphicsPipelineLibrary_ } { } @@ -67433,9 +68142,9 @@ namespace VULKAN_HPP_NAMESPACE PhysicalDeviceGraphicsPipelineLibraryPropertiesEXT( VULKAN_HPP_NAMESPACE::Bool32 graphicsPipelineLibraryFastLinking_ = {}, VULKAN_HPP_NAMESPACE::Bool32 graphicsPipelineLibraryIndependentInterpolationDecoration_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , graphicsPipelineLibraryFastLinking( graphicsPipelineLibraryFastLinking_ ) - , graphicsPipelineLibraryIndependentInterpolationDecoration( graphicsPipelineLibraryIndependentInterpolationDecoration_ ) + : pNext{ pNext_ } + , graphicsPipelineLibraryFastLinking{ graphicsPipelineLibraryFastLinking_ } + , graphicsPipelineLibraryIndependentInterpolationDecoration{ graphicsPipelineLibraryIndependentInterpolationDecoration_ } { } @@ -67546,10 +68255,10 @@ namespace VULKAN_HPP_NAMESPACE std::array const & physicalDevices_ = {}, VULKAN_HPP_NAMESPACE::Bool32 subsetAllocation_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , physicalDeviceCount( physicalDeviceCount_ ) - , physicalDevices( physicalDevices_ ) - , subsetAllocation( subsetAllocation_ ) + : pNext{ pNext_ } + , physicalDeviceCount{ physicalDeviceCount_ } + , physicalDevices{ physicalDevices_ } + , subsetAllocation{ subsetAllocation_ } { } @@ -67560,19 +68269,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PhysicalDeviceGroupProperties( VULKAN_HPP_NAMESPACE::ArrayProxy const & physicalDevices_, - VULKAN_HPP_NAMESPACE::Bool32 subsetAllocation_ = {}, - void * pNext_ = nullptr ) - : pNext( pNext_ ) - , physicalDeviceCount( std::min( static_cast( physicalDevices_.size() ), VK_MAX_DEVICE_GROUP_SIZE ) ) - , subsetAllocation( subsetAllocation_ ) - { - VULKAN_HPP_ASSERT( physicalDevices_.size() < VK_MAX_DEVICE_GROUP_SIZE ); - memcpy( physicalDevices, physicalDevices_.data(), physicalDeviceCount * sizeof( VULKAN_HPP_NAMESPACE::PhysicalDevice ) ); - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - PhysicalDeviceGroupProperties & operator=( PhysicalDeviceGroupProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -67666,8 +68362,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceHostImageCopyFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 hostImageCopy_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , hostImageCopy( hostImageCopy_ ) + : pNext{ pNext_ } + , hostImageCopy{ hostImageCopy_ } { } @@ -67768,13 +68464,13 @@ namespace VULKAN_HPP_NAMESPACE std::array const & optimalTilingLayoutUUID_ = {}, VULKAN_HPP_NAMESPACE::Bool32 identicalMemoryTypeRequirements_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , copySrcLayoutCount( copySrcLayoutCount_ ) - , pCopySrcLayouts( pCopySrcLayouts_ ) - , copyDstLayoutCount( copyDstLayoutCount_ ) - , pCopyDstLayouts( pCopyDstLayouts_ ) - , optimalTilingLayoutUUID( optimalTilingLayoutUUID_ ) - , identicalMemoryTypeRequirements( identicalMemoryTypeRequirements_ ) + : pNext{ pNext_ } + , copySrcLayoutCount{ copySrcLayoutCount_ } + , pCopySrcLayouts{ pCopySrcLayouts_ } + , copyDstLayoutCount{ copyDstLayoutCount_ } + , pCopyDstLayouts{ pCopyDstLayouts_ } + , optimalTilingLayoutUUID{ optimalTilingLayoutUUID_ } + , identicalMemoryTypeRequirements{ identicalMemoryTypeRequirements_ } { } @@ -67955,8 +68651,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceHostQueryResetFeatures( VULKAN_HPP_NAMESPACE::Bool32 hostQueryReset_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , hostQueryReset( hostQueryReset_ ) + : pNext{ pNext_ } + , hostQueryReset{ hostQueryReset_ } { } @@ -68058,12 +68754,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t deviceNodeMask_ = {}, VULKAN_HPP_NAMESPACE::Bool32 deviceLUIDValid_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceUUID( deviceUUID_ ) - , driverUUID( driverUUID_ ) - , deviceLUID( deviceLUID_ ) - , deviceNodeMask( deviceNodeMask_ ) - , deviceLUIDValid( deviceLUIDValid_ ) + : pNext{ pNext_ } + , deviceUUID{ deviceUUID_ } + , driverUUID{ driverUUID_ } + , deviceLUID{ deviceLUID_ } + , deviceNodeMask{ deviceNodeMask_ } + , deviceLUIDValid{ deviceLUIDValid_ } { } @@ -68159,9 +68855,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceImage2DViewOf3DFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 image2DViewOf3D_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sampler2DViewOf3D_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , image2DViewOf3D( image2DViewOf3D_ ) - , sampler2DViewOf3D( sampler2DViewOf3D_ ) + : pNext{ pNext_ } + , image2DViewOf3D{ image2DViewOf3D_ } + , sampler2DViewOf3D{ sampler2DViewOf3D_ } { } @@ -68255,6 +68951,205 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceImage2DViewOf3DFeaturesEXT; }; + struct PhysicalDeviceImageAlignmentControlFeaturesMESA + { + using NativeType = VkPhysicalDeviceImageAlignmentControlFeaturesMESA; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceImageAlignmentControlFeaturesMESA; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceImageAlignmentControlFeaturesMESA( VULKAN_HPP_NAMESPACE::Bool32 imageAlignmentControl_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , imageAlignmentControl{ imageAlignmentControl_ } + { + } + + VULKAN_HPP_CONSTEXPR + PhysicalDeviceImageAlignmentControlFeaturesMESA( PhysicalDeviceImageAlignmentControlFeaturesMESA const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceImageAlignmentControlFeaturesMESA( VkPhysicalDeviceImageAlignmentControlFeaturesMESA const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceImageAlignmentControlFeaturesMESA( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceImageAlignmentControlFeaturesMESA & operator=( PhysicalDeviceImageAlignmentControlFeaturesMESA const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceImageAlignmentControlFeaturesMESA & operator=( VkPhysicalDeviceImageAlignmentControlFeaturesMESA const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceImageAlignmentControlFeaturesMESA & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceImageAlignmentControlFeaturesMESA & + setImageAlignmentControl( VULKAN_HPP_NAMESPACE::Bool32 imageAlignmentControl_ ) VULKAN_HPP_NOEXCEPT + { + imageAlignmentControl = imageAlignmentControl_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPhysicalDeviceImageAlignmentControlFeaturesMESA const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceImageAlignmentControlFeaturesMESA &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, imageAlignmentControl ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceImageAlignmentControlFeaturesMESA const & ) const = default; +#else + bool operator==( PhysicalDeviceImageAlignmentControlFeaturesMESA const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( imageAlignmentControl == rhs.imageAlignmentControl ); +# endif + } + + bool operator!=( PhysicalDeviceImageAlignmentControlFeaturesMESA const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceImageAlignmentControlFeaturesMESA; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 imageAlignmentControl = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceImageAlignmentControlFeaturesMESA; + }; + + struct PhysicalDeviceImageAlignmentControlPropertiesMESA + { + using NativeType = VkPhysicalDeviceImageAlignmentControlPropertiesMESA; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceImageAlignmentControlPropertiesMESA; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceImageAlignmentControlPropertiesMESA( uint32_t supportedImageAlignmentMask_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , supportedImageAlignmentMask{ supportedImageAlignmentMask_ } + { + } + + VULKAN_HPP_CONSTEXPR + PhysicalDeviceImageAlignmentControlPropertiesMESA( PhysicalDeviceImageAlignmentControlPropertiesMESA const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceImageAlignmentControlPropertiesMESA( VkPhysicalDeviceImageAlignmentControlPropertiesMESA const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceImageAlignmentControlPropertiesMESA( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceImageAlignmentControlPropertiesMESA & + operator=( PhysicalDeviceImageAlignmentControlPropertiesMESA const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceImageAlignmentControlPropertiesMESA & operator=( VkPhysicalDeviceImageAlignmentControlPropertiesMESA const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceImageAlignmentControlPropertiesMESA & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceImageAlignmentControlPropertiesMESA & + setSupportedImageAlignmentMask( uint32_t supportedImageAlignmentMask_ ) VULKAN_HPP_NOEXCEPT + { + supportedImageAlignmentMask = supportedImageAlignmentMask_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPhysicalDeviceImageAlignmentControlPropertiesMESA const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceImageAlignmentControlPropertiesMESA &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, supportedImageAlignmentMask ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceImageAlignmentControlPropertiesMESA const & ) const = default; +#else + bool operator==( PhysicalDeviceImageAlignmentControlPropertiesMESA const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( supportedImageAlignmentMask == rhs.supportedImageAlignmentMask ); +# endif + } + + bool operator!=( PhysicalDeviceImageAlignmentControlPropertiesMESA const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceImageAlignmentControlPropertiesMESA; + void * pNext = {}; + uint32_t supportedImageAlignmentMask = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceImageAlignmentControlPropertiesMESA; + }; + struct PhysicalDeviceImageCompressionControlFeaturesEXT { using NativeType = VkPhysicalDeviceImageCompressionControlFeaturesEXT; @@ -68265,8 +69160,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceImageCompressionControlFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 imageCompressionControl_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageCompressionControl( imageCompressionControl_ ) + : pNext{ pNext_ } + , imageCompressionControl{ imageCompressionControl_ } { } @@ -68364,8 +69259,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceImageCompressionControlSwapchainFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 imageCompressionControlSwapchain_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageCompressionControlSwapchain( imageCompressionControlSwapchain_ ) + : pNext{ pNext_ } + , imageCompressionControlSwapchain{ imageCompressionControlSwapchain_ } { } @@ -68470,11 +69365,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t queueFamilyIndexCount_ = {}, const uint32_t * pQueueFamilyIndices_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , drmFormatModifier( drmFormatModifier_ ) - , sharingMode( sharingMode_ ) - , queueFamilyIndexCount( queueFamilyIndexCount_ ) - , pQueueFamilyIndices( pQueueFamilyIndices_ ) + : pNext{ pNext_ } + , drmFormatModifier{ drmFormatModifier_ } + , sharingMode{ sharingMode_ } + , queueFamilyIndexCount{ queueFamilyIndexCount_ } + , pQueueFamilyIndices{ pQueueFamilyIndices_ } { } @@ -68625,12 +69520,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_ = {}, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , format( format_ ) - , type( type_ ) - , tiling( tiling_ ) - , usage( usage_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , format{ format_ } + , type{ type_ } + , tiling{ tiling_ } + , usage{ usage_ } + , flags{ flags_ } { } @@ -68763,8 +69658,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceImageProcessing2FeaturesQCOM( VULKAN_HPP_NAMESPACE::Bool32 textureBlockMatch2_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , textureBlockMatch2( textureBlockMatch2_ ) + : pNext{ pNext_ } + , textureBlockMatch2{ textureBlockMatch2_ } { } @@ -68861,8 +69756,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceImageProcessing2PropertiesQCOM( VULKAN_HPP_NAMESPACE::Extent2D maxBlockMatchWindow_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxBlockMatchWindow( maxBlockMatchWindow_ ) + : pNext{ pNext_ } + , maxBlockMatchWindow{ maxBlockMatchWindow_ } { } @@ -68946,10 +69841,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 textureBoxFilter_ = {}, VULKAN_HPP_NAMESPACE::Bool32 textureBlockMatch_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , textureSampleWeighted( textureSampleWeighted_ ) - , textureBoxFilter( textureBoxFilter_ ) - , textureBlockMatch( textureBlockMatch_ ) + : pNext{ pNext_ } + , textureSampleWeighted{ textureSampleWeighted_ } + , textureBoxFilter{ textureBoxFilter_ } + , textureBlockMatch{ textureBlockMatch_ } { } @@ -69070,11 +69965,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Extent2D maxBlockMatchRegion_ = {}, VULKAN_HPP_NAMESPACE::Extent2D maxBoxFilterBlockSize_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxWeightFilterPhases( maxWeightFilterPhases_ ) - , maxWeightFilterDimension( maxWeightFilterDimension_ ) - , maxBlockMatchRegion( maxBlockMatchRegion_ ) - , maxBoxFilterBlockSize( maxBoxFilterBlockSize_ ) + : pNext{ pNext_ } + , maxWeightFilterPhases{ maxWeightFilterPhases_ } + , maxWeightFilterDimension{ maxWeightFilterDimension_ } + , maxBlockMatchRegion{ maxBlockMatchRegion_ } + , maxBoxFilterBlockSize{ maxBoxFilterBlockSize_ } { } @@ -69166,8 +70061,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceImageRobustnessFeatures( VULKAN_HPP_NAMESPACE::Bool32 robustImageAccess_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , robustImageAccess( robustImageAccess_ ) + : pNext{ pNext_ } + , robustImageAccess{ robustImageAccess_ } { } @@ -69265,8 +70160,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceImageSlicedViewOf3DFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 imageSlicedViewOf3D_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageSlicedViewOf3D( imageSlicedViewOf3D_ ) + : pNext{ pNext_ } + , imageSlicedViewOf3D{ imageSlicedViewOf3D_ } { } @@ -69364,8 +70259,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceImageViewImageFormatInfoEXT( VULKAN_HPP_NAMESPACE::ImageViewType imageViewType_ = VULKAN_HPP_NAMESPACE::ImageViewType::e1D, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageViewType( imageViewType_ ) + : pNext{ pNext_ } + , imageViewType{ imageViewType_ } { } @@ -69461,8 +70356,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceImageViewMinLodFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 minLod_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , minLod( minLod_ ) + : pNext{ pNext_ } + , minLod{ minLod_ } { } @@ -69558,8 +70453,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceImagelessFramebufferFeatures( VULKAN_HPP_NAMESPACE::Bool32 imagelessFramebuffer_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imagelessFramebuffer( imagelessFramebuffer_ ) + : pNext{ pNext_ } + , imagelessFramebuffer{ imagelessFramebuffer_ } { } @@ -69658,8 +70553,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceIndexTypeUint8FeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 indexTypeUint8_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , indexTypeUint8( indexTypeUint8_ ) + : pNext{ pNext_ } + , indexTypeUint8{ indexTypeUint8_ } { } @@ -69757,8 +70652,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceInheritedViewportScissorFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 inheritedViewportScissor2D_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , inheritedViewportScissor2D( inheritedViewportScissor2D_ ) + : pNext{ pNext_ } + , inheritedViewportScissor2D{ inheritedViewportScissor2D_ } { } @@ -69857,9 +70752,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceInlineUniformBlockFeatures( VULKAN_HPP_NAMESPACE::Bool32 inlineUniformBlock_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingInlineUniformBlockUpdateAfterBind_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , inlineUniformBlock( inlineUniformBlock_ ) - , descriptorBindingInlineUniformBlockUpdateAfterBind( descriptorBindingInlineUniformBlockUpdateAfterBind_ ) + : pNext{ pNext_ } + , inlineUniformBlock{ inlineUniformBlock_ } + , descriptorBindingInlineUniformBlockUpdateAfterBind{ descriptorBindingInlineUniformBlockUpdateAfterBind_ } { } @@ -69971,12 +70866,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxDescriptorSetInlineUniformBlocks_ = {}, uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxInlineUniformBlockSize( maxInlineUniformBlockSize_ ) - , maxPerStageDescriptorInlineUniformBlocks( maxPerStageDescriptorInlineUniformBlocks_ ) - , maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks( maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks_ ) - , maxDescriptorSetInlineUniformBlocks( maxDescriptorSetInlineUniformBlocks_ ) - , maxDescriptorSetUpdateAfterBindInlineUniformBlocks( maxDescriptorSetUpdateAfterBindInlineUniformBlocks_ ) + : pNext{ pNext_ } + , maxInlineUniformBlockSize{ maxInlineUniformBlockSize_ } + , maxPerStageDescriptorInlineUniformBlocks{ maxPerStageDescriptorInlineUniformBlocks_ } + , maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks{ maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks_ } + , maxDescriptorSetInlineUniformBlocks{ maxDescriptorSetInlineUniformBlocks_ } + , maxDescriptorSetUpdateAfterBindInlineUniformBlocks{ maxDescriptorSetUpdateAfterBindInlineUniformBlocks_ } { } @@ -70080,8 +70975,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceInvocationMaskFeaturesHUAWEI( VULKAN_HPP_NAMESPACE::Bool32 invocationMask_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , invocationMask( invocationMask_ ) + : pNext{ pNext_ } + , invocationMask{ invocationMask_ } { } @@ -70167,185 +71062,231 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceInvocationMaskFeaturesHUAWEI; }; - struct PhysicalDeviceLayeredDriverPropertiesMSFT + struct PhysicalDeviceLayeredApiPropertiesKHR { - using NativeType = VkPhysicalDeviceLayeredDriverPropertiesMSFT; + using NativeType = VkPhysicalDeviceLayeredApiPropertiesKHR; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLayeredDriverPropertiesMSFT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLayeredApiPropertiesKHR; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceLayeredDriverPropertiesMSFT( - VULKAN_HPP_NAMESPACE::LayeredDriverUnderlyingApiMSFT underlyingAPI_ = VULKAN_HPP_NAMESPACE::LayeredDriverUnderlyingApiMSFT::eNone, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , underlyingAPI( underlyingAPI_ ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLayeredApiPropertiesKHR( + uint32_t vendorID_ = {}, + uint32_t deviceID_ = {}, + VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiKHR layeredAPI_ = VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiKHR::eVulkan, + std::array const & deviceName_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , vendorID{ vendorID_ } + , deviceID{ deviceID_ } + , layeredAPI{ layeredAPI_ } + , deviceName{ deviceName_ } { } - VULKAN_HPP_CONSTEXPR PhysicalDeviceLayeredDriverPropertiesMSFT( PhysicalDeviceLayeredDriverPropertiesMSFT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLayeredApiPropertiesKHR( PhysicalDeviceLayeredApiPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceLayeredDriverPropertiesMSFT( VkPhysicalDeviceLayeredDriverPropertiesMSFT const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceLayeredDriverPropertiesMSFT( *reinterpret_cast( &rhs ) ) + PhysicalDeviceLayeredApiPropertiesKHR( VkPhysicalDeviceLayeredApiPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceLayeredApiPropertiesKHR( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceLayeredDriverPropertiesMSFT & operator=( PhysicalDeviceLayeredDriverPropertiesMSFT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceLayeredApiPropertiesKHR & operator=( PhysicalDeviceLayeredApiPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceLayeredDriverPropertiesMSFT & operator=( VkPhysicalDeviceLayeredDriverPropertiesMSFT const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceLayeredApiPropertiesKHR & operator=( VkPhysicalDeviceLayeredApiPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } - operator VkPhysicalDeviceLayeredDriverPropertiesMSFT const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceLayeredApiPropertiesKHR const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceLayeredDriverPropertiesMSFT &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceLayeredApiPropertiesKHR &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) # if 14 <= VULKAN_HPP_CPP_VERSION auto # else - std::tuple + std::tuple const &> # endif reflect() const VULKAN_HPP_NOEXCEPT { - return std::tie( sType, pNext, underlyingAPI ); + return std::tie( sType, pNext, vendorID, deviceID, layeredAPI, deviceName ); } #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceLayeredDriverPropertiesMSFT const & ) const = default; + auto operator<=>( PhysicalDeviceLayeredApiPropertiesKHR const & ) const = default; #else - bool operator==( PhysicalDeviceLayeredDriverPropertiesMSFT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceLayeredApiPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); # else - return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( underlyingAPI == rhs.underlyingAPI ); + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( vendorID == rhs.vendorID ) && ( deviceID == rhs.deviceID ) && + ( layeredAPI == rhs.layeredAPI ) && ( deviceName == rhs.deviceName ); # endif } - bool operator!=( PhysicalDeviceLayeredDriverPropertiesMSFT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceLayeredApiPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLayeredDriverPropertiesMSFT; - void * pNext = {}; - VULKAN_HPP_NAMESPACE::LayeredDriverUnderlyingApiMSFT underlyingAPI = VULKAN_HPP_NAMESPACE::LayeredDriverUnderlyingApiMSFT::eNone; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLayeredApiPropertiesKHR; + void * pNext = {}; + uint32_t vendorID = {}; + uint32_t deviceID = {}; + VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiKHR layeredAPI = VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiKHR::eVulkan; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D deviceName = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceLayeredDriverPropertiesMSFT; + using Type = PhysicalDeviceLayeredApiPropertiesKHR; }; - struct PhysicalDeviceLegacyDitheringFeaturesEXT + struct PhysicalDeviceLayeredApiPropertiesListKHR { - using NativeType = VkPhysicalDeviceLegacyDitheringFeaturesEXT; + using NativeType = VkPhysicalDeviceLayeredApiPropertiesListKHR; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLegacyDitheringFeaturesEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLayeredApiPropertiesListKHR; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceLegacyDitheringFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 legacyDithering_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , legacyDithering( legacyDithering_ ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLayeredApiPropertiesListKHR( uint32_t layeredApiCount_ = {}, + VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiPropertiesKHR * pLayeredApis_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , layeredApiCount{ layeredApiCount_ } + , pLayeredApis{ pLayeredApis_ } { } - VULKAN_HPP_CONSTEXPR PhysicalDeviceLegacyDitheringFeaturesEXT( PhysicalDeviceLegacyDitheringFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLayeredApiPropertiesListKHR( PhysicalDeviceLayeredApiPropertiesListKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceLegacyDitheringFeaturesEXT( VkPhysicalDeviceLegacyDitheringFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceLegacyDitheringFeaturesEXT( *reinterpret_cast( &rhs ) ) + PhysicalDeviceLayeredApiPropertiesListKHR( VkPhysicalDeviceLayeredApiPropertiesListKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceLayeredApiPropertiesListKHR( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceLegacyDitheringFeaturesEXT & operator=( PhysicalDeviceLegacyDitheringFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; +# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) + PhysicalDeviceLayeredApiPropertiesListKHR( + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & layeredApis_, void * pNext_ = nullptr ) + : pNext( pNext_ ), layeredApiCount( static_cast( layeredApis_.size() ) ), pLayeredApis( layeredApis_.data() ) + { + } +# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + PhysicalDeviceLayeredApiPropertiesListKHR & operator=( PhysicalDeviceLayeredApiPropertiesListKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceLegacyDitheringFeaturesEXT & operator=( VkPhysicalDeviceLegacyDitheringFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceLayeredApiPropertiesListKHR & operator=( VkPhysicalDeviceLayeredApiPropertiesListKHR const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLegacyDitheringFeaturesEXT & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLayeredApiPropertiesListKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLegacyDitheringFeaturesEXT & setLegacyDithering( VULKAN_HPP_NAMESPACE::Bool32 legacyDithering_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLayeredApiPropertiesListKHR & setLayeredApiCount( uint32_t layeredApiCount_ ) VULKAN_HPP_NOEXCEPT { - legacyDithering = legacyDithering_; + layeredApiCount = layeredApiCount_; return *this; } -#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDeviceLegacyDitheringFeaturesEXT const &() const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLayeredApiPropertiesListKHR & + setPLayeredApis( VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiPropertiesKHR * pLayeredApis_ ) VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + pLayeredApis = pLayeredApis_; + return *this; } - operator VkPhysicalDeviceLegacyDitheringFeaturesEXT &() VULKAN_HPP_NOEXCEPT +# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) + PhysicalDeviceLayeredApiPropertiesListKHR & setLayeredApis( + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & layeredApis_ ) VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + layeredApiCount = static_cast( layeredApis_.size() ); + pLayeredApis = layeredApis_.data(); + return *this; + } +# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPhysicalDeviceLayeredApiPropertiesListKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceLayeredApiPropertiesListKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) # if 14 <= VULKAN_HPP_CPP_VERSION auto # else - std::tuple + std::tuple # endif reflect() const VULKAN_HPP_NOEXCEPT { - return std::tie( sType, pNext, legacyDithering ); + return std::tie( sType, pNext, layeredApiCount, pLayeredApis ); } #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceLegacyDitheringFeaturesEXT const & ) const = default; + auto operator<=>( PhysicalDeviceLayeredApiPropertiesListKHR const & ) const = default; #else - bool operator==( PhysicalDeviceLegacyDitheringFeaturesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceLayeredApiPropertiesListKHR const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); # else - return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( legacyDithering == rhs.legacyDithering ); + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( layeredApiCount == rhs.layeredApiCount ) && ( pLayeredApis == rhs.pLayeredApis ); # endif } - bool operator!=( PhysicalDeviceLegacyDitheringFeaturesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceLayeredApiPropertiesListKHR const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLegacyDitheringFeaturesEXT; - void * pNext = {}; - VULKAN_HPP_NAMESPACE::Bool32 legacyDithering = {}; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLayeredApiPropertiesListKHR; + void * pNext = {}; + uint32_t layeredApiCount = {}; + VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredApiPropertiesKHR * pLayeredApis = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceLegacyDitheringFeaturesEXT; + using Type = PhysicalDeviceLayeredApiPropertiesListKHR; }; struct PhysicalDeviceLimits @@ -70459,112 +71400,112 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize optimalBufferCopyOffsetAlignment_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize optimalBufferCopyRowPitchAlignment_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize nonCoherentAtomSize_ = {} ) VULKAN_HPP_NOEXCEPT - : maxImageDimension1D( maxImageDimension1D_ ) - , maxImageDimension2D( maxImageDimension2D_ ) - , maxImageDimension3D( maxImageDimension3D_ ) - , maxImageDimensionCube( maxImageDimensionCube_ ) - , maxImageArrayLayers( maxImageArrayLayers_ ) - , maxTexelBufferElements( maxTexelBufferElements_ ) - , maxUniformBufferRange( maxUniformBufferRange_ ) - , maxStorageBufferRange( maxStorageBufferRange_ ) - , maxPushConstantsSize( maxPushConstantsSize_ ) - , maxMemoryAllocationCount( maxMemoryAllocationCount_ ) - , maxSamplerAllocationCount( maxSamplerAllocationCount_ ) - , bufferImageGranularity( bufferImageGranularity_ ) - , sparseAddressSpaceSize( sparseAddressSpaceSize_ ) - , maxBoundDescriptorSets( maxBoundDescriptorSets_ ) - , maxPerStageDescriptorSamplers( maxPerStageDescriptorSamplers_ ) - , maxPerStageDescriptorUniformBuffers( maxPerStageDescriptorUniformBuffers_ ) - , maxPerStageDescriptorStorageBuffers( maxPerStageDescriptorStorageBuffers_ ) - , maxPerStageDescriptorSampledImages( maxPerStageDescriptorSampledImages_ ) - , maxPerStageDescriptorStorageImages( maxPerStageDescriptorStorageImages_ ) - , maxPerStageDescriptorInputAttachments( maxPerStageDescriptorInputAttachments_ ) - , maxPerStageResources( maxPerStageResources_ ) - , maxDescriptorSetSamplers( maxDescriptorSetSamplers_ ) - , maxDescriptorSetUniformBuffers( maxDescriptorSetUniformBuffers_ ) - , maxDescriptorSetUniformBuffersDynamic( maxDescriptorSetUniformBuffersDynamic_ ) - , maxDescriptorSetStorageBuffers( maxDescriptorSetStorageBuffers_ ) - , maxDescriptorSetStorageBuffersDynamic( maxDescriptorSetStorageBuffersDynamic_ ) - , maxDescriptorSetSampledImages( maxDescriptorSetSampledImages_ ) - , maxDescriptorSetStorageImages( maxDescriptorSetStorageImages_ ) - , maxDescriptorSetInputAttachments( maxDescriptorSetInputAttachments_ ) - , maxVertexInputAttributes( maxVertexInputAttributes_ ) - , maxVertexInputBindings( maxVertexInputBindings_ ) - , maxVertexInputAttributeOffset( maxVertexInputAttributeOffset_ ) - , maxVertexInputBindingStride( maxVertexInputBindingStride_ ) - , maxVertexOutputComponents( maxVertexOutputComponents_ ) - , maxTessellationGenerationLevel( maxTessellationGenerationLevel_ ) - , maxTessellationPatchSize( maxTessellationPatchSize_ ) - , maxTessellationControlPerVertexInputComponents( maxTessellationControlPerVertexInputComponents_ ) - , maxTessellationControlPerVertexOutputComponents( maxTessellationControlPerVertexOutputComponents_ ) - , maxTessellationControlPerPatchOutputComponents( maxTessellationControlPerPatchOutputComponents_ ) - , maxTessellationControlTotalOutputComponents( maxTessellationControlTotalOutputComponents_ ) - , maxTessellationEvaluationInputComponents( maxTessellationEvaluationInputComponents_ ) - , maxTessellationEvaluationOutputComponents( maxTessellationEvaluationOutputComponents_ ) - , maxGeometryShaderInvocations( maxGeometryShaderInvocations_ ) - , maxGeometryInputComponents( maxGeometryInputComponents_ ) - , maxGeometryOutputComponents( maxGeometryOutputComponents_ ) - , maxGeometryOutputVertices( maxGeometryOutputVertices_ ) - , maxGeometryTotalOutputComponents( maxGeometryTotalOutputComponents_ ) - , maxFragmentInputComponents( maxFragmentInputComponents_ ) - , maxFragmentOutputAttachments( maxFragmentOutputAttachments_ ) - , maxFragmentDualSrcAttachments( maxFragmentDualSrcAttachments_ ) - , maxFragmentCombinedOutputResources( maxFragmentCombinedOutputResources_ ) - , maxComputeSharedMemorySize( maxComputeSharedMemorySize_ ) - , maxComputeWorkGroupCount( maxComputeWorkGroupCount_ ) - , maxComputeWorkGroupInvocations( maxComputeWorkGroupInvocations_ ) - , maxComputeWorkGroupSize( maxComputeWorkGroupSize_ ) - , subPixelPrecisionBits( subPixelPrecisionBits_ ) - , subTexelPrecisionBits( subTexelPrecisionBits_ ) - , mipmapPrecisionBits( mipmapPrecisionBits_ ) - , maxDrawIndexedIndexValue( maxDrawIndexedIndexValue_ ) - , maxDrawIndirectCount( maxDrawIndirectCount_ ) - , maxSamplerLodBias( maxSamplerLodBias_ ) - , maxSamplerAnisotropy( maxSamplerAnisotropy_ ) - , maxViewports( maxViewports_ ) - , maxViewportDimensions( maxViewportDimensions_ ) - , viewportBoundsRange( viewportBoundsRange_ ) - , viewportSubPixelBits( viewportSubPixelBits_ ) - , minMemoryMapAlignment( minMemoryMapAlignment_ ) - , minTexelBufferOffsetAlignment( minTexelBufferOffsetAlignment_ ) - , minUniformBufferOffsetAlignment( minUniformBufferOffsetAlignment_ ) - , minStorageBufferOffsetAlignment( minStorageBufferOffsetAlignment_ ) - , minTexelOffset( minTexelOffset_ ) - , maxTexelOffset( maxTexelOffset_ ) - , minTexelGatherOffset( minTexelGatherOffset_ ) - , maxTexelGatherOffset( maxTexelGatherOffset_ ) - , minInterpolationOffset( minInterpolationOffset_ ) - , maxInterpolationOffset( maxInterpolationOffset_ ) - , subPixelInterpolationOffsetBits( subPixelInterpolationOffsetBits_ ) - , maxFramebufferWidth( maxFramebufferWidth_ ) - , maxFramebufferHeight( maxFramebufferHeight_ ) - , maxFramebufferLayers( maxFramebufferLayers_ ) - , framebufferColorSampleCounts( framebufferColorSampleCounts_ ) - , framebufferDepthSampleCounts( framebufferDepthSampleCounts_ ) - , framebufferStencilSampleCounts( framebufferStencilSampleCounts_ ) - , framebufferNoAttachmentsSampleCounts( framebufferNoAttachmentsSampleCounts_ ) - , maxColorAttachments( maxColorAttachments_ ) - , sampledImageColorSampleCounts( sampledImageColorSampleCounts_ ) - , sampledImageIntegerSampleCounts( sampledImageIntegerSampleCounts_ ) - , sampledImageDepthSampleCounts( sampledImageDepthSampleCounts_ ) - , sampledImageStencilSampleCounts( sampledImageStencilSampleCounts_ ) - , storageImageSampleCounts( storageImageSampleCounts_ ) - , maxSampleMaskWords( maxSampleMaskWords_ ) - , timestampComputeAndGraphics( timestampComputeAndGraphics_ ) - , timestampPeriod( timestampPeriod_ ) - , maxClipDistances( maxClipDistances_ ) - , maxCullDistances( maxCullDistances_ ) - , maxCombinedClipAndCullDistances( maxCombinedClipAndCullDistances_ ) - , discreteQueuePriorities( discreteQueuePriorities_ ) - , pointSizeRange( pointSizeRange_ ) - , lineWidthRange( lineWidthRange_ ) - , pointSizeGranularity( pointSizeGranularity_ ) - , lineWidthGranularity( lineWidthGranularity_ ) - , strictLines( strictLines_ ) - , standardSampleLocations( standardSampleLocations_ ) - , optimalBufferCopyOffsetAlignment( optimalBufferCopyOffsetAlignment_ ) - , optimalBufferCopyRowPitchAlignment( optimalBufferCopyRowPitchAlignment_ ) - , nonCoherentAtomSize( nonCoherentAtomSize_ ) + : maxImageDimension1D{ maxImageDimension1D_ } + , maxImageDimension2D{ maxImageDimension2D_ } + , maxImageDimension3D{ maxImageDimension3D_ } + , maxImageDimensionCube{ maxImageDimensionCube_ } + , maxImageArrayLayers{ maxImageArrayLayers_ } + , maxTexelBufferElements{ maxTexelBufferElements_ } + , maxUniformBufferRange{ maxUniformBufferRange_ } + , maxStorageBufferRange{ maxStorageBufferRange_ } + , maxPushConstantsSize{ maxPushConstantsSize_ } + , maxMemoryAllocationCount{ maxMemoryAllocationCount_ } + , maxSamplerAllocationCount{ maxSamplerAllocationCount_ } + , bufferImageGranularity{ bufferImageGranularity_ } + , sparseAddressSpaceSize{ sparseAddressSpaceSize_ } + , maxBoundDescriptorSets{ maxBoundDescriptorSets_ } + , maxPerStageDescriptorSamplers{ maxPerStageDescriptorSamplers_ } + , maxPerStageDescriptorUniformBuffers{ maxPerStageDescriptorUniformBuffers_ } + , maxPerStageDescriptorStorageBuffers{ maxPerStageDescriptorStorageBuffers_ } + , maxPerStageDescriptorSampledImages{ maxPerStageDescriptorSampledImages_ } + , maxPerStageDescriptorStorageImages{ maxPerStageDescriptorStorageImages_ } + , maxPerStageDescriptorInputAttachments{ maxPerStageDescriptorInputAttachments_ } + , maxPerStageResources{ maxPerStageResources_ } + , maxDescriptorSetSamplers{ maxDescriptorSetSamplers_ } + , maxDescriptorSetUniformBuffers{ maxDescriptorSetUniformBuffers_ } + , maxDescriptorSetUniformBuffersDynamic{ maxDescriptorSetUniformBuffersDynamic_ } + , maxDescriptorSetStorageBuffers{ maxDescriptorSetStorageBuffers_ } + , maxDescriptorSetStorageBuffersDynamic{ maxDescriptorSetStorageBuffersDynamic_ } + , maxDescriptorSetSampledImages{ maxDescriptorSetSampledImages_ } + , maxDescriptorSetStorageImages{ maxDescriptorSetStorageImages_ } + , maxDescriptorSetInputAttachments{ maxDescriptorSetInputAttachments_ } + , maxVertexInputAttributes{ maxVertexInputAttributes_ } + , maxVertexInputBindings{ maxVertexInputBindings_ } + , maxVertexInputAttributeOffset{ maxVertexInputAttributeOffset_ } + , maxVertexInputBindingStride{ maxVertexInputBindingStride_ } + , maxVertexOutputComponents{ maxVertexOutputComponents_ } + , maxTessellationGenerationLevel{ maxTessellationGenerationLevel_ } + , maxTessellationPatchSize{ maxTessellationPatchSize_ } + , maxTessellationControlPerVertexInputComponents{ maxTessellationControlPerVertexInputComponents_ } + , maxTessellationControlPerVertexOutputComponents{ maxTessellationControlPerVertexOutputComponents_ } + , maxTessellationControlPerPatchOutputComponents{ maxTessellationControlPerPatchOutputComponents_ } + , maxTessellationControlTotalOutputComponents{ maxTessellationControlTotalOutputComponents_ } + , maxTessellationEvaluationInputComponents{ maxTessellationEvaluationInputComponents_ } + , maxTessellationEvaluationOutputComponents{ maxTessellationEvaluationOutputComponents_ } + , maxGeometryShaderInvocations{ maxGeometryShaderInvocations_ } + , maxGeometryInputComponents{ maxGeometryInputComponents_ } + , maxGeometryOutputComponents{ maxGeometryOutputComponents_ } + , maxGeometryOutputVertices{ maxGeometryOutputVertices_ } + , maxGeometryTotalOutputComponents{ maxGeometryTotalOutputComponents_ } + , maxFragmentInputComponents{ maxFragmentInputComponents_ } + , maxFragmentOutputAttachments{ maxFragmentOutputAttachments_ } + , maxFragmentDualSrcAttachments{ maxFragmentDualSrcAttachments_ } + , maxFragmentCombinedOutputResources{ maxFragmentCombinedOutputResources_ } + , maxComputeSharedMemorySize{ maxComputeSharedMemorySize_ } + , maxComputeWorkGroupCount{ maxComputeWorkGroupCount_ } + , maxComputeWorkGroupInvocations{ maxComputeWorkGroupInvocations_ } + , maxComputeWorkGroupSize{ maxComputeWorkGroupSize_ } + , subPixelPrecisionBits{ subPixelPrecisionBits_ } + , subTexelPrecisionBits{ subTexelPrecisionBits_ } + , mipmapPrecisionBits{ mipmapPrecisionBits_ } + , maxDrawIndexedIndexValue{ maxDrawIndexedIndexValue_ } + , maxDrawIndirectCount{ maxDrawIndirectCount_ } + , maxSamplerLodBias{ maxSamplerLodBias_ } + , maxSamplerAnisotropy{ maxSamplerAnisotropy_ } + , maxViewports{ maxViewports_ } + , maxViewportDimensions{ maxViewportDimensions_ } + , viewportBoundsRange{ viewportBoundsRange_ } + , viewportSubPixelBits{ viewportSubPixelBits_ } + , minMemoryMapAlignment{ minMemoryMapAlignment_ } + , minTexelBufferOffsetAlignment{ minTexelBufferOffsetAlignment_ } + , minUniformBufferOffsetAlignment{ minUniformBufferOffsetAlignment_ } + , minStorageBufferOffsetAlignment{ minStorageBufferOffsetAlignment_ } + , minTexelOffset{ minTexelOffset_ } + , maxTexelOffset{ maxTexelOffset_ } + , minTexelGatherOffset{ minTexelGatherOffset_ } + , maxTexelGatherOffset{ maxTexelGatherOffset_ } + , minInterpolationOffset{ minInterpolationOffset_ } + , maxInterpolationOffset{ maxInterpolationOffset_ } + , subPixelInterpolationOffsetBits{ subPixelInterpolationOffsetBits_ } + , maxFramebufferWidth{ maxFramebufferWidth_ } + , maxFramebufferHeight{ maxFramebufferHeight_ } + , maxFramebufferLayers{ maxFramebufferLayers_ } + , framebufferColorSampleCounts{ framebufferColorSampleCounts_ } + , framebufferDepthSampleCounts{ framebufferDepthSampleCounts_ } + , framebufferStencilSampleCounts{ framebufferStencilSampleCounts_ } + , framebufferNoAttachmentsSampleCounts{ framebufferNoAttachmentsSampleCounts_ } + , maxColorAttachments{ maxColorAttachments_ } + , sampledImageColorSampleCounts{ sampledImageColorSampleCounts_ } + , sampledImageIntegerSampleCounts{ sampledImageIntegerSampleCounts_ } + , sampledImageDepthSampleCounts{ sampledImageDepthSampleCounts_ } + , sampledImageStencilSampleCounts{ sampledImageStencilSampleCounts_ } + , storageImageSampleCounts{ storageImageSampleCounts_ } + , maxSampleMaskWords{ maxSampleMaskWords_ } + , timestampComputeAndGraphics{ timestampComputeAndGraphics_ } + , timestampPeriod{ timestampPeriod_ } + , maxClipDistances{ maxClipDistances_ } + , maxCullDistances{ maxCullDistances_ } + , maxCombinedClipAndCullDistances{ maxCombinedClipAndCullDistances_ } + , discreteQueuePriorities{ discreteQueuePriorities_ } + , pointSizeRange{ pointSizeRange_ } + , lineWidthRange{ lineWidthRange_ } + , pointSizeGranularity{ pointSizeGranularity_ } + , lineWidthGranularity{ lineWidthGranularity_ } + , strictLines{ strictLines_ } + , standardSampleLocations{ standardSampleLocations_ } + , optimalBufferCopyOffsetAlignment{ optimalBufferCopyOffsetAlignment_ } + , optimalBufferCopyRowPitchAlignment{ optimalBufferCopyRowPitchAlignment_ } + , nonCoherentAtomSize{ nonCoherentAtomSize_ } { } @@ -71004,6 +71945,770 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize nonCoherentAtomSize = {}; }; + struct PhysicalDeviceSparseProperties + { + using NativeType = VkPhysicalDeviceSparseProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceSparseProperties( VULKAN_HPP_NAMESPACE::Bool32 residencyStandard2DBlockShape_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 residencyStandard2DMultisampleBlockShape_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 residencyStandard3DBlockShape_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 residencyAlignedMipSize_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 residencyNonResidentStrict_ = {} ) VULKAN_HPP_NOEXCEPT + : residencyStandard2DBlockShape{ residencyStandard2DBlockShape_ } + , residencyStandard2DMultisampleBlockShape{ residencyStandard2DMultisampleBlockShape_ } + , residencyStandard3DBlockShape{ residencyStandard3DBlockShape_ } + , residencyAlignedMipSize{ residencyAlignedMipSize_ } + , residencyNonResidentStrict{ residencyNonResidentStrict_ } + { + } + + VULKAN_HPP_CONSTEXPR PhysicalDeviceSparseProperties( PhysicalDeviceSparseProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceSparseProperties( VkPhysicalDeviceSparseProperties const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceSparseProperties( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceSparseProperties & operator=( PhysicalDeviceSparseProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceSparseProperties & operator=( VkPhysicalDeviceSparseProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + operator VkPhysicalDeviceSparseProperties const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceSparseProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( residencyStandard2DBlockShape, + residencyStandard2DMultisampleBlockShape, + residencyStandard3DBlockShape, + residencyAlignedMipSize, + residencyNonResidentStrict ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceSparseProperties const & ) const = default; +#else + bool operator==( PhysicalDeviceSparseProperties const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( residencyStandard2DBlockShape == rhs.residencyStandard2DBlockShape ) && + ( residencyStandard2DMultisampleBlockShape == rhs.residencyStandard2DMultisampleBlockShape ) && + ( residencyStandard3DBlockShape == rhs.residencyStandard3DBlockShape ) && ( residencyAlignedMipSize == rhs.residencyAlignedMipSize ) && + ( residencyNonResidentStrict == rhs.residencyNonResidentStrict ); +# endif + } + + bool operator!=( PhysicalDeviceSparseProperties const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::Bool32 residencyStandard2DBlockShape = {}; + VULKAN_HPP_NAMESPACE::Bool32 residencyStandard2DMultisampleBlockShape = {}; + VULKAN_HPP_NAMESPACE::Bool32 residencyStandard3DBlockShape = {}; + VULKAN_HPP_NAMESPACE::Bool32 residencyAlignedMipSize = {}; + VULKAN_HPP_NAMESPACE::Bool32 residencyNonResidentStrict = {}; + }; + + struct PhysicalDeviceProperties + { + using NativeType = VkPhysicalDeviceProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceProperties( uint32_t apiVersion_ = {}, + uint32_t driverVersion_ = {}, + uint32_t vendorID_ = {}, + uint32_t deviceID_ = {}, + VULKAN_HPP_NAMESPACE::PhysicalDeviceType deviceType_ = VULKAN_HPP_NAMESPACE::PhysicalDeviceType::eOther, + std::array const & deviceName_ = {}, + std::array const & pipelineCacheUUID_ = {}, + VULKAN_HPP_NAMESPACE::PhysicalDeviceLimits limits_ = {}, + VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseProperties sparseProperties_ = {} ) VULKAN_HPP_NOEXCEPT + : apiVersion{ apiVersion_ } + , driverVersion{ driverVersion_ } + , vendorID{ vendorID_ } + , deviceID{ deviceID_ } + , deviceType{ deviceType_ } + , deviceName{ deviceName_ } + , pipelineCacheUUID{ pipelineCacheUUID_ } + , limits{ limits_ } + , sparseProperties{ sparseProperties_ } + { + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceProperties( PhysicalDeviceProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceProperties( VkPhysicalDeviceProperties const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceProperties( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceProperties & operator=( PhysicalDeviceProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceProperties & operator=( VkPhysicalDeviceProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + operator VkPhysicalDeviceProperties const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple const &, + VULKAN_HPP_NAMESPACE::ArrayWrapper1D const &, + VULKAN_HPP_NAMESPACE::PhysicalDeviceLimits const &, + VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseProperties const &> +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( apiVersion, driverVersion, vendorID, deviceID, deviceType, deviceName, pipelineCacheUUID, limits, sparseProperties ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + std::partial_ordering operator<=>( PhysicalDeviceProperties const & rhs ) const VULKAN_HPP_NOEXCEPT + { + if ( auto cmp = apiVersion <=> rhs.apiVersion; cmp != 0 ) + return cmp; + if ( auto cmp = driverVersion <=> rhs.driverVersion; cmp != 0 ) + return cmp; + if ( auto cmp = vendorID <=> rhs.vendorID; cmp != 0 ) + return cmp; + if ( auto cmp = deviceID <=> rhs.deviceID; cmp != 0 ) + return cmp; + if ( auto cmp = deviceType <=> rhs.deviceType; cmp != 0 ) + return cmp; + if ( auto cmp = strcmp( deviceName, rhs.deviceName ); cmp != 0 ) + return ( cmp < 0 ) ? std::partial_ordering::less : std::partial_ordering::greater; + if ( auto cmp = pipelineCacheUUID <=> rhs.pipelineCacheUUID; cmp != 0 ) + return cmp; + if ( auto cmp = limits <=> rhs.limits; cmp != 0 ) + return cmp; + if ( auto cmp = sparseProperties <=> rhs.sparseProperties; cmp != 0 ) + return cmp; + + return std::partial_ordering::equivalent; + } +#endif + + bool operator==( PhysicalDeviceProperties const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( apiVersion == rhs.apiVersion ) && ( driverVersion == rhs.driverVersion ) && ( vendorID == rhs.vendorID ) && ( deviceID == rhs.deviceID ) && + ( deviceType == rhs.deviceType ) && ( strcmp( deviceName, rhs.deviceName ) == 0 ) && ( pipelineCacheUUID == rhs.pipelineCacheUUID ) && + ( limits == rhs.limits ) && ( sparseProperties == rhs.sparseProperties ); + } + + bool operator!=( PhysicalDeviceProperties const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } + + public: + uint32_t apiVersion = {}; + uint32_t driverVersion = {}; + uint32_t vendorID = {}; + uint32_t deviceID = {}; + VULKAN_HPP_NAMESPACE::PhysicalDeviceType deviceType = VULKAN_HPP_NAMESPACE::PhysicalDeviceType::eOther; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D deviceName = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D pipelineCacheUUID = {}; + VULKAN_HPP_NAMESPACE::PhysicalDeviceLimits limits = {}; + VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseProperties sparseProperties = {}; + }; + + struct PhysicalDeviceProperties2 + { + using NativeType = VkPhysicalDeviceProperties2; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceProperties2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties properties_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , properties{ properties_ } + { + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceProperties2( PhysicalDeviceProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceProperties2( VkPhysicalDeviceProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceProperties2( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceProperties2 & operator=( PhysicalDeviceProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceProperties2 & operator=( VkPhysicalDeviceProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + operator VkPhysicalDeviceProperties2 const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceProperties2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, properties ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceProperties2 const & ) const = default; +#else + bool operator==( PhysicalDeviceProperties2 const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( properties == rhs.properties ); +# endif + } + + bool operator!=( PhysicalDeviceProperties2 const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceProperties2; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties properties = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceProperties2; + }; + + using PhysicalDeviceProperties2KHR = PhysicalDeviceProperties2; + + struct PhysicalDeviceLayeredApiVulkanPropertiesKHR + { + using NativeType = VkPhysicalDeviceLayeredApiVulkanPropertiesKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLayeredApiVulkanPropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLayeredApiVulkanPropertiesKHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 properties_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , properties{ properties_ } + { + } + + VULKAN_HPP_CONSTEXPR_14 + PhysicalDeviceLayeredApiVulkanPropertiesKHR( PhysicalDeviceLayeredApiVulkanPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceLayeredApiVulkanPropertiesKHR( VkPhysicalDeviceLayeredApiVulkanPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceLayeredApiVulkanPropertiesKHR( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceLayeredApiVulkanPropertiesKHR & operator=( PhysicalDeviceLayeredApiVulkanPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceLayeredApiVulkanPropertiesKHR & operator=( VkPhysicalDeviceLayeredApiVulkanPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + operator VkPhysicalDeviceLayeredApiVulkanPropertiesKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceLayeredApiVulkanPropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, properties ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceLayeredApiVulkanPropertiesKHR const & ) const = default; +#else + bool operator==( PhysicalDeviceLayeredApiVulkanPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( properties == rhs.properties ); +# endif + } + + bool operator!=( PhysicalDeviceLayeredApiVulkanPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLayeredApiVulkanPropertiesKHR; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 properties = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceLayeredApiVulkanPropertiesKHR; + }; + + struct PhysicalDeviceLayeredDriverPropertiesMSFT + { + using NativeType = VkPhysicalDeviceLayeredDriverPropertiesMSFT; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLayeredDriverPropertiesMSFT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceLayeredDriverPropertiesMSFT( + VULKAN_HPP_NAMESPACE::LayeredDriverUnderlyingApiMSFT underlyingAPI_ = VULKAN_HPP_NAMESPACE::LayeredDriverUnderlyingApiMSFT::eNone, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , underlyingAPI{ underlyingAPI_ } + { + } + + VULKAN_HPP_CONSTEXPR PhysicalDeviceLayeredDriverPropertiesMSFT( PhysicalDeviceLayeredDriverPropertiesMSFT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceLayeredDriverPropertiesMSFT( VkPhysicalDeviceLayeredDriverPropertiesMSFT const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceLayeredDriverPropertiesMSFT( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceLayeredDriverPropertiesMSFT & operator=( PhysicalDeviceLayeredDriverPropertiesMSFT const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceLayeredDriverPropertiesMSFT & operator=( VkPhysicalDeviceLayeredDriverPropertiesMSFT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + operator VkPhysicalDeviceLayeredDriverPropertiesMSFT const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceLayeredDriverPropertiesMSFT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, underlyingAPI ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceLayeredDriverPropertiesMSFT const & ) const = default; +#else + bool operator==( PhysicalDeviceLayeredDriverPropertiesMSFT const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( underlyingAPI == rhs.underlyingAPI ); +# endif + } + + bool operator!=( PhysicalDeviceLayeredDriverPropertiesMSFT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLayeredDriverPropertiesMSFT; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::LayeredDriverUnderlyingApiMSFT underlyingAPI = VULKAN_HPP_NAMESPACE::LayeredDriverUnderlyingApiMSFT::eNone; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceLayeredDriverPropertiesMSFT; + }; + + struct PhysicalDeviceLegacyDitheringFeaturesEXT + { + using NativeType = VkPhysicalDeviceLegacyDitheringFeaturesEXT; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLegacyDitheringFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceLegacyDitheringFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 legacyDithering_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , legacyDithering{ legacyDithering_ } + { + } + + VULKAN_HPP_CONSTEXPR PhysicalDeviceLegacyDitheringFeaturesEXT( PhysicalDeviceLegacyDitheringFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceLegacyDitheringFeaturesEXT( VkPhysicalDeviceLegacyDitheringFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceLegacyDitheringFeaturesEXT( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceLegacyDitheringFeaturesEXT & operator=( PhysicalDeviceLegacyDitheringFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceLegacyDitheringFeaturesEXT & operator=( VkPhysicalDeviceLegacyDitheringFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLegacyDitheringFeaturesEXT & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLegacyDitheringFeaturesEXT & setLegacyDithering( VULKAN_HPP_NAMESPACE::Bool32 legacyDithering_ ) VULKAN_HPP_NOEXCEPT + { + legacyDithering = legacyDithering_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPhysicalDeviceLegacyDitheringFeaturesEXT const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceLegacyDitheringFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, legacyDithering ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceLegacyDitheringFeaturesEXT const & ) const = default; +#else + bool operator==( PhysicalDeviceLegacyDitheringFeaturesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( legacyDithering == rhs.legacyDithering ); +# endif + } + + bool operator!=( PhysicalDeviceLegacyDitheringFeaturesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLegacyDitheringFeaturesEXT; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 legacyDithering = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceLegacyDitheringFeaturesEXT; + }; + + struct PhysicalDeviceLegacyVertexAttributesFeaturesEXT + { + using NativeType = VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLegacyVertexAttributesFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceLegacyVertexAttributesFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 legacyVertexAttributes_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , legacyVertexAttributes{ legacyVertexAttributes_ } + { + } + + VULKAN_HPP_CONSTEXPR + PhysicalDeviceLegacyVertexAttributesFeaturesEXT( PhysicalDeviceLegacyVertexAttributesFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceLegacyVertexAttributesFeaturesEXT( VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceLegacyVertexAttributesFeaturesEXT( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceLegacyVertexAttributesFeaturesEXT & operator=( PhysicalDeviceLegacyVertexAttributesFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceLegacyVertexAttributesFeaturesEXT & operator=( VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLegacyVertexAttributesFeaturesEXT & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLegacyVertexAttributesFeaturesEXT & + setLegacyVertexAttributes( VULKAN_HPP_NAMESPACE::Bool32 legacyVertexAttributes_ ) VULKAN_HPP_NOEXCEPT + { + legacyVertexAttributes = legacyVertexAttributes_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, legacyVertexAttributes ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceLegacyVertexAttributesFeaturesEXT const & ) const = default; +#else + bool operator==( PhysicalDeviceLegacyVertexAttributesFeaturesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( legacyVertexAttributes == rhs.legacyVertexAttributes ); +# endif + } + + bool operator!=( PhysicalDeviceLegacyVertexAttributesFeaturesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLegacyVertexAttributesFeaturesEXT; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 legacyVertexAttributes = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceLegacyVertexAttributesFeaturesEXT; + }; + + struct PhysicalDeviceLegacyVertexAttributesPropertiesEXT + { + using NativeType = VkPhysicalDeviceLegacyVertexAttributesPropertiesEXT; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLegacyVertexAttributesPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceLegacyVertexAttributesPropertiesEXT( VULKAN_HPP_NAMESPACE::Bool32 nativeUnalignedPerformance_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , nativeUnalignedPerformance{ nativeUnalignedPerformance_ } + { + } + + VULKAN_HPP_CONSTEXPR + PhysicalDeviceLegacyVertexAttributesPropertiesEXT( PhysicalDeviceLegacyVertexAttributesPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceLegacyVertexAttributesPropertiesEXT( VkPhysicalDeviceLegacyVertexAttributesPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceLegacyVertexAttributesPropertiesEXT( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceLegacyVertexAttributesPropertiesEXT & + operator=( PhysicalDeviceLegacyVertexAttributesPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceLegacyVertexAttributesPropertiesEXT & operator=( VkPhysicalDeviceLegacyVertexAttributesPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLegacyVertexAttributesPropertiesEXT & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLegacyVertexAttributesPropertiesEXT & + setNativeUnalignedPerformance( VULKAN_HPP_NAMESPACE::Bool32 nativeUnalignedPerformance_ ) VULKAN_HPP_NOEXCEPT + { + nativeUnalignedPerformance = nativeUnalignedPerformance_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPhysicalDeviceLegacyVertexAttributesPropertiesEXT const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceLegacyVertexAttributesPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, nativeUnalignedPerformance ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceLegacyVertexAttributesPropertiesEXT const & ) const = default; +#else + bool operator==( PhysicalDeviceLegacyVertexAttributesPropertiesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( nativeUnalignedPerformance == rhs.nativeUnalignedPerformance ); +# endif + } + + bool operator!=( PhysicalDeviceLegacyVertexAttributesPropertiesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLegacyVertexAttributesPropertiesEXT; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 nativeUnalignedPerformance = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceLegacyVertexAttributesPropertiesEXT; + }; + struct PhysicalDeviceLineRasterizationFeaturesKHR { using NativeType = VkPhysicalDeviceLineRasterizationFeaturesKHR; @@ -71019,13 +72724,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 stippledBresenhamLines_ = {}, VULKAN_HPP_NAMESPACE::Bool32 stippledSmoothLines_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , rectangularLines( rectangularLines_ ) - , bresenhamLines( bresenhamLines_ ) - , smoothLines( smoothLines_ ) - , stippledRectangularLines( stippledRectangularLines_ ) - , stippledBresenhamLines( stippledBresenhamLines_ ) - , stippledSmoothLines( stippledSmoothLines_ ) + : pNext{ pNext_ } + , rectangularLines{ rectangularLines_ } + , bresenhamLines{ bresenhamLines_ } + , smoothLines{ smoothLines_ } + , stippledRectangularLines{ stippledRectangularLines_ } + , stippledBresenhamLines{ stippledBresenhamLines_ } + , stippledSmoothLines{ stippledSmoothLines_ } { } @@ -71170,8 +72875,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceLineRasterizationPropertiesKHR( uint32_t lineSubPixelPrecisionBits_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , lineSubPixelPrecisionBits( lineSubPixelPrecisionBits_ ) + : pNext{ pNext_ } + , lineSubPixelPrecisionBits{ lineSubPixelPrecisionBits_ } { } @@ -71255,8 +72960,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceLinearColorAttachmentFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 linearColorAttachment_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , linearColorAttachment( linearColorAttachment_ ) + : pNext{ pNext_ } + , linearColorAttachment{ linearColorAttachment_ } { } @@ -71355,9 +73060,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance3Properties( uint32_t maxPerSetDescriptors_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize maxMemoryAllocationSize_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxPerSetDescriptors( maxPerSetDescriptors_ ) - , maxMemoryAllocationSize( maxMemoryAllocationSize_ ) + : pNext{ pNext_ } + , maxPerSetDescriptors{ maxPerSetDescriptors_ } + , maxMemoryAllocationSize{ maxMemoryAllocationSize_ } { } @@ -71442,8 +73147,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance4Features( VULKAN_HPP_NAMESPACE::Bool32 maintenance4_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maintenance4( maintenance4_ ) + : pNext{ pNext_ } + , maintenance4{ maintenance4_ } { } @@ -71541,8 +73246,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance4Properties( VULKAN_HPP_NAMESPACE::DeviceSize maxBufferSize_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxBufferSize( maxBufferSize_ ) + : pNext{ pNext_ } + , maxBufferSize{ maxBufferSize_ } { } @@ -71625,8 +73330,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance5FeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 maintenance5_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maintenance5( maintenance5_ ) + : pNext{ pNext_ } + , maintenance5{ maintenance5_ } { } @@ -71727,13 +73432,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 nonStrictSinglePixelWideLinesUseParallelogram_ = {}, VULKAN_HPP_NAMESPACE::Bool32 nonStrictWideLinesUseParallelogram_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , earlyFragmentMultisampleCoverageAfterSampleCounting( earlyFragmentMultisampleCoverageAfterSampleCounting_ ) - , earlyFragmentSampleMaskTestBeforeSampleCounting( earlyFragmentSampleMaskTestBeforeSampleCounting_ ) - , depthStencilSwizzleOneSupport( depthStencilSwizzleOneSupport_ ) - , polygonModePointSize( polygonModePointSize_ ) - , nonStrictSinglePixelWideLinesUseParallelogram( nonStrictSinglePixelWideLinesUseParallelogram_ ) - , nonStrictWideLinesUseParallelogram( nonStrictWideLinesUseParallelogram_ ) + : pNext{ pNext_ } + , earlyFragmentMultisampleCoverageAfterSampleCounting{ earlyFragmentMultisampleCoverageAfterSampleCounting_ } + , earlyFragmentSampleMaskTestBeforeSampleCounting{ earlyFragmentSampleMaskTestBeforeSampleCounting_ } + , depthStencilSwizzleOneSupport{ depthStencilSwizzleOneSupport_ } + , polygonModePointSize{ polygonModePointSize_ } + , nonStrictSinglePixelWideLinesUseParallelogram{ nonStrictSinglePixelWideLinesUseParallelogram_ } + , nonStrictWideLinesUseParallelogram{ nonStrictWideLinesUseParallelogram_ } { } @@ -71838,8 +73543,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance6FeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 maintenance6_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maintenance6( maintenance6_ ) + : pNext{ pNext_ } + , maintenance6{ maintenance6_ } { } @@ -71937,10 +73642,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxCombinedImageSamplerDescriptorCount_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateClampCombinerInputs_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , blockTexelViewCompatibleMultipleLayers( blockTexelViewCompatibleMultipleLayers_ ) - , maxCombinedImageSamplerDescriptorCount( maxCombinedImageSamplerDescriptorCount_ ) - , fragmentShadingRateClampCombinerInputs( fragmentShadingRateClampCombinerInputs_ ) + : pNext{ pNext_ } + , blockTexelViewCompatibleMultipleLayers{ blockTexelViewCompatibleMultipleLayers_ } + , maxCombinedImageSamplerDescriptorCount{ maxCombinedImageSamplerDescriptorCount_ } + , fragmentShadingRateClampCombinerInputs{ fragmentShadingRateClampCombinerInputs_ } { } @@ -72020,6 +73725,232 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceMaintenance6PropertiesKHR; }; + struct PhysicalDeviceMaintenance7FeaturesKHR + { + using NativeType = VkPhysicalDeviceMaintenance7FeaturesKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMaintenance7FeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance7FeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 maintenance7_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , maintenance7{ maintenance7_ } + { + } + + VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance7FeaturesKHR( PhysicalDeviceMaintenance7FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceMaintenance7FeaturesKHR( VkPhysicalDeviceMaintenance7FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceMaintenance7FeaturesKHR( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceMaintenance7FeaturesKHR & operator=( PhysicalDeviceMaintenance7FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceMaintenance7FeaturesKHR & operator=( VkPhysicalDeviceMaintenance7FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMaintenance7FeaturesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMaintenance7FeaturesKHR & setMaintenance7( VULKAN_HPP_NAMESPACE::Bool32 maintenance7_ ) VULKAN_HPP_NOEXCEPT + { + maintenance7 = maintenance7_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPhysicalDeviceMaintenance7FeaturesKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceMaintenance7FeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, maintenance7 ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceMaintenance7FeaturesKHR const & ) const = default; +#else + bool operator==( PhysicalDeviceMaintenance7FeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( maintenance7 == rhs.maintenance7 ); +# endif + } + + bool operator!=( PhysicalDeviceMaintenance7FeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMaintenance7FeaturesKHR; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 maintenance7 = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceMaintenance7FeaturesKHR; + }; + + struct PhysicalDeviceMaintenance7PropertiesKHR + { + using NativeType = VkPhysicalDeviceMaintenance7PropertiesKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMaintenance7PropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance7PropertiesKHR( VULKAN_HPP_NAMESPACE::Bool32 robustFragmentShadingRateAttachmentAccess_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 separateDepthStencilAttachmentAccess_ = {}, + uint32_t maxDescriptorSetTotalUniformBuffersDynamic_ = {}, + uint32_t maxDescriptorSetTotalStorageBuffersDynamic_ = {}, + uint32_t maxDescriptorSetTotalBuffersDynamic_ = {}, + uint32_t maxDescriptorSetUpdateAfterBindTotalUniformBuffersDynamic_ = {}, + uint32_t maxDescriptorSetUpdateAfterBindTotalStorageBuffersDynamic_ = {}, + uint32_t maxDescriptorSetUpdateAfterBindTotalBuffersDynamic_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , robustFragmentShadingRateAttachmentAccess{ robustFragmentShadingRateAttachmentAccess_ } + , separateDepthStencilAttachmentAccess{ separateDepthStencilAttachmentAccess_ } + , maxDescriptorSetTotalUniformBuffersDynamic{ maxDescriptorSetTotalUniformBuffersDynamic_ } + , maxDescriptorSetTotalStorageBuffersDynamic{ maxDescriptorSetTotalStorageBuffersDynamic_ } + , maxDescriptorSetTotalBuffersDynamic{ maxDescriptorSetTotalBuffersDynamic_ } + , maxDescriptorSetUpdateAfterBindTotalUniformBuffersDynamic{ maxDescriptorSetUpdateAfterBindTotalUniformBuffersDynamic_ } + , maxDescriptorSetUpdateAfterBindTotalStorageBuffersDynamic{ maxDescriptorSetUpdateAfterBindTotalStorageBuffersDynamic_ } + , maxDescriptorSetUpdateAfterBindTotalBuffersDynamic{ maxDescriptorSetUpdateAfterBindTotalBuffersDynamic_ } + { + } + + VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance7PropertiesKHR( PhysicalDeviceMaintenance7PropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceMaintenance7PropertiesKHR( VkPhysicalDeviceMaintenance7PropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceMaintenance7PropertiesKHR( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceMaintenance7PropertiesKHR & operator=( PhysicalDeviceMaintenance7PropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceMaintenance7PropertiesKHR & operator=( VkPhysicalDeviceMaintenance7PropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + operator VkPhysicalDeviceMaintenance7PropertiesKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceMaintenance7PropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, + pNext, + robustFragmentShadingRateAttachmentAccess, + separateDepthStencilAttachmentAccess, + maxDescriptorSetTotalUniformBuffersDynamic, + maxDescriptorSetTotalStorageBuffersDynamic, + maxDescriptorSetTotalBuffersDynamic, + maxDescriptorSetUpdateAfterBindTotalUniformBuffersDynamic, + maxDescriptorSetUpdateAfterBindTotalStorageBuffersDynamic, + maxDescriptorSetUpdateAfterBindTotalBuffersDynamic ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceMaintenance7PropertiesKHR const & ) const = default; +#else + bool operator==( PhysicalDeviceMaintenance7PropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && + ( robustFragmentShadingRateAttachmentAccess == rhs.robustFragmentShadingRateAttachmentAccess ) && + ( separateDepthStencilAttachmentAccess == rhs.separateDepthStencilAttachmentAccess ) && + ( maxDescriptorSetTotalUniformBuffersDynamic == rhs.maxDescriptorSetTotalUniformBuffersDynamic ) && + ( maxDescriptorSetTotalStorageBuffersDynamic == rhs.maxDescriptorSetTotalStorageBuffersDynamic ) && + ( maxDescriptorSetTotalBuffersDynamic == rhs.maxDescriptorSetTotalBuffersDynamic ) && + ( maxDescriptorSetUpdateAfterBindTotalUniformBuffersDynamic == rhs.maxDescriptorSetUpdateAfterBindTotalUniformBuffersDynamic ) && + ( maxDescriptorSetUpdateAfterBindTotalStorageBuffersDynamic == rhs.maxDescriptorSetUpdateAfterBindTotalStorageBuffersDynamic ) && + ( maxDescriptorSetUpdateAfterBindTotalBuffersDynamic == rhs.maxDescriptorSetUpdateAfterBindTotalBuffersDynamic ); +# endif + } + + bool operator!=( PhysicalDeviceMaintenance7PropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMaintenance7PropertiesKHR; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 robustFragmentShadingRateAttachmentAccess = {}; + VULKAN_HPP_NAMESPACE::Bool32 separateDepthStencilAttachmentAccess = {}; + uint32_t maxDescriptorSetTotalUniformBuffersDynamic = {}; + uint32_t maxDescriptorSetTotalStorageBuffersDynamic = {}; + uint32_t maxDescriptorSetTotalBuffersDynamic = {}; + uint32_t maxDescriptorSetUpdateAfterBindTotalUniformBuffersDynamic = {}; + uint32_t maxDescriptorSetUpdateAfterBindTotalStorageBuffersDynamic = {}; + uint32_t maxDescriptorSetUpdateAfterBindTotalBuffersDynamic = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceMaintenance7PropertiesKHR; + }; + struct PhysicalDeviceMapMemoryPlacedFeaturesEXT { using NativeType = VkPhysicalDeviceMapMemoryPlacedFeaturesEXT; @@ -72032,10 +73963,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 memoryMapRangePlaced_ = {}, VULKAN_HPP_NAMESPACE::Bool32 memoryUnmapReserve_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memoryMapPlaced( memoryMapPlaced_ ) - , memoryMapRangePlaced( memoryMapRangePlaced_ ) - , memoryUnmapReserve( memoryUnmapReserve_ ) + : pNext{ pNext_ } + , memoryMapPlaced{ memoryMapPlaced_ } + , memoryMapRangePlaced{ memoryMapRangePlaced_ } + , memoryUnmapReserve{ memoryUnmapReserve_ } { } @@ -72152,8 +74083,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceMapMemoryPlacedPropertiesEXT( VULKAN_HPP_NAMESPACE::DeviceSize minPlacedMemoryMapAlignment_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , minPlacedMemoryMapAlignment( minPlacedMemoryMapAlignment_ ) + : pNext{ pNext_ } + , minPlacedMemoryMapAlignment{ minPlacedMemoryMapAlignment_ } { } @@ -72236,9 +74167,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMemoryBudgetPropertiesEXT( std::array const & heapBudget_ = {}, std::array const & heapUsage_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , heapBudget( heapBudget_ ) - , heapUsage( heapUsage_ ) + : pNext{ pNext_ } + , heapBudget{ heapBudget_ } + , heapUsage{ heapUsage_ } { } @@ -72324,8 +74255,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceMemoryDecompressionFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 memoryDecompression_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memoryDecompression( memoryDecompression_ ) + : pNext{ pNext_ } + , memoryDecompression{ memoryDecompression_ } { } @@ -72423,9 +74354,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceMemoryDecompressionPropertiesNV( VULKAN_HPP_NAMESPACE::MemoryDecompressionMethodFlagsNV decompressionMethods_ = {}, uint64_t maxDecompressionIndirectCount_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , decompressionMethods( decompressionMethods_ ) - , maxDecompressionIndirectCount( maxDecompressionIndirectCount_ ) + : pNext{ pNext_ } + , decompressionMethods{ decompressionMethods_ } + , maxDecompressionIndirectCount{ maxDecompressionIndirectCount_ } { } @@ -72510,8 +74441,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceMemoryPriorityFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 memoryPriority_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memoryPriority( memoryPriority_ ) + : pNext{ pNext_ } + , memoryPriority{ memoryPriority_ } { } @@ -72607,10 +74538,10 @@ namespace VULKAN_HPP_NAMESPACE std::array const & memoryTypes_ = {}, uint32_t memoryHeapCount_ = {}, std::array const & memoryHeaps_ = {} ) VULKAN_HPP_NOEXCEPT - : memoryTypeCount( memoryTypeCount_ ) - , memoryTypes( memoryTypes_ ) - , memoryHeapCount( memoryHeapCount_ ) - , memoryHeaps( memoryHeaps_ ) + : memoryTypeCount{ memoryTypeCount_ } + , memoryTypes{ memoryTypes_ } + , memoryHeapCount{ memoryHeapCount_ } + , memoryHeaps{ memoryHeaps_ } { } @@ -72621,19 +74552,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PhysicalDeviceMemoryProperties( VULKAN_HPP_NAMESPACE::ArrayProxy const & memoryTypes_, - VULKAN_HPP_NAMESPACE::ArrayProxy const & memoryHeaps_ = {} ) - : memoryTypeCount( std::min( static_cast( memoryTypes_.size() ), VK_MAX_MEMORY_TYPES ) ) - , memoryHeapCount( std::min( static_cast( memoryHeaps_.size() ), VK_MAX_MEMORY_HEAPS ) ) - { - VULKAN_HPP_ASSERT( memoryTypes_.size() < VK_MAX_MEMORY_TYPES ); - memcpy( memoryTypes, memoryTypes_.data(), memoryTypeCount * sizeof( VULKAN_HPP_NAMESPACE::MemoryType ) ); - VULKAN_HPP_ASSERT( memoryHeaps_.size() < VK_MAX_MEMORY_HEAPS ); - memcpy( memoryHeaps, memoryHeaps_.data(), memoryHeapCount * sizeof( VULKAN_HPP_NAMESPACE::MemoryHeap ) ); - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - PhysicalDeviceMemoryProperties & operator=( PhysicalDeviceMemoryProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -72720,8 +74638,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMemoryProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties memoryProperties_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memoryProperties( memoryProperties_ ) + : pNext{ pNext_ } + , memoryProperties{ memoryProperties_ } { } @@ -72809,12 +74727,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 primitiveFragmentShadingRateMeshShader_ = {}, VULKAN_HPP_NAMESPACE::Bool32 meshShaderQueries_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , taskShader( taskShader_ ) - , meshShader( meshShader_ ) - , multiviewMeshShader( multiviewMeshShader_ ) - , primitiveFragmentShadingRateMeshShader( primitiveFragmentShadingRateMeshShader_ ) - , meshShaderQueries( meshShaderQueries_ ) + : pNext{ pNext_ } + , taskShader{ taskShader_ } + , meshShader{ meshShader_ } + , multiviewMeshShader{ multiviewMeshShader_ } + , primitiveFragmentShadingRateMeshShader{ primitiveFragmentShadingRateMeshShader_ } + , meshShaderQueries{ meshShaderQueries_ } { } @@ -72949,9 +74867,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceMeshShaderFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 taskShader_ = {}, VULKAN_HPP_NAMESPACE::Bool32 meshShader_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , taskShader( taskShader_ ) - , meshShader( meshShader_ ) + : pNext{ pNext_ } + , taskShader{ taskShader_ } + , meshShader{ meshShader_ } { } @@ -73081,35 +74999,35 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 prefersCompactVertexOutput_ = {}, VULKAN_HPP_NAMESPACE::Bool32 prefersCompactPrimitiveOutput_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxTaskWorkGroupTotalCount( maxTaskWorkGroupTotalCount_ ) - , maxTaskWorkGroupCount( maxTaskWorkGroupCount_ ) - , maxTaskWorkGroupInvocations( maxTaskWorkGroupInvocations_ ) - , maxTaskWorkGroupSize( maxTaskWorkGroupSize_ ) - , maxTaskPayloadSize( maxTaskPayloadSize_ ) - , maxTaskSharedMemorySize( maxTaskSharedMemorySize_ ) - , maxTaskPayloadAndSharedMemorySize( maxTaskPayloadAndSharedMemorySize_ ) - , maxMeshWorkGroupTotalCount( maxMeshWorkGroupTotalCount_ ) - , maxMeshWorkGroupCount( maxMeshWorkGroupCount_ ) - , maxMeshWorkGroupInvocations( maxMeshWorkGroupInvocations_ ) - , maxMeshWorkGroupSize( maxMeshWorkGroupSize_ ) - , maxMeshSharedMemorySize( maxMeshSharedMemorySize_ ) - , maxMeshPayloadAndSharedMemorySize( maxMeshPayloadAndSharedMemorySize_ ) - , maxMeshOutputMemorySize( maxMeshOutputMemorySize_ ) - , maxMeshPayloadAndOutputMemorySize( maxMeshPayloadAndOutputMemorySize_ ) - , maxMeshOutputComponents( maxMeshOutputComponents_ ) - , maxMeshOutputVertices( maxMeshOutputVertices_ ) - , maxMeshOutputPrimitives( maxMeshOutputPrimitives_ ) - , maxMeshOutputLayers( maxMeshOutputLayers_ ) - , maxMeshMultiviewViewCount( maxMeshMultiviewViewCount_ ) - , meshOutputPerVertexGranularity( meshOutputPerVertexGranularity_ ) - , meshOutputPerPrimitiveGranularity( meshOutputPerPrimitiveGranularity_ ) - , maxPreferredTaskWorkGroupInvocations( maxPreferredTaskWorkGroupInvocations_ ) - , maxPreferredMeshWorkGroupInvocations( maxPreferredMeshWorkGroupInvocations_ ) - , prefersLocalInvocationVertexOutput( prefersLocalInvocationVertexOutput_ ) - , prefersLocalInvocationPrimitiveOutput( prefersLocalInvocationPrimitiveOutput_ ) - , prefersCompactVertexOutput( prefersCompactVertexOutput_ ) - , prefersCompactPrimitiveOutput( prefersCompactPrimitiveOutput_ ) + : pNext{ pNext_ } + , maxTaskWorkGroupTotalCount{ maxTaskWorkGroupTotalCount_ } + , maxTaskWorkGroupCount{ maxTaskWorkGroupCount_ } + , maxTaskWorkGroupInvocations{ maxTaskWorkGroupInvocations_ } + , maxTaskWorkGroupSize{ maxTaskWorkGroupSize_ } + , maxTaskPayloadSize{ maxTaskPayloadSize_ } + , maxTaskSharedMemorySize{ maxTaskSharedMemorySize_ } + , maxTaskPayloadAndSharedMemorySize{ maxTaskPayloadAndSharedMemorySize_ } + , maxMeshWorkGroupTotalCount{ maxMeshWorkGroupTotalCount_ } + , maxMeshWorkGroupCount{ maxMeshWorkGroupCount_ } + , maxMeshWorkGroupInvocations{ maxMeshWorkGroupInvocations_ } + , maxMeshWorkGroupSize{ maxMeshWorkGroupSize_ } + , maxMeshSharedMemorySize{ maxMeshSharedMemorySize_ } + , maxMeshPayloadAndSharedMemorySize{ maxMeshPayloadAndSharedMemorySize_ } + , maxMeshOutputMemorySize{ maxMeshOutputMemorySize_ } + , maxMeshPayloadAndOutputMemorySize{ maxMeshPayloadAndOutputMemorySize_ } + , maxMeshOutputComponents{ maxMeshOutputComponents_ } + , maxMeshOutputVertices{ maxMeshOutputVertices_ } + , maxMeshOutputPrimitives{ maxMeshOutputPrimitives_ } + , maxMeshOutputLayers{ maxMeshOutputLayers_ } + , maxMeshMultiviewViewCount{ maxMeshMultiviewViewCount_ } + , meshOutputPerVertexGranularity{ meshOutputPerVertexGranularity_ } + , meshOutputPerPrimitiveGranularity{ meshOutputPerPrimitiveGranularity_ } + , maxPreferredTaskWorkGroupInvocations{ maxPreferredTaskWorkGroupInvocations_ } + , maxPreferredMeshWorkGroupInvocations{ maxPreferredMeshWorkGroupInvocations_ } + , prefersLocalInvocationVertexOutput{ prefersLocalInvocationVertexOutput_ } + , prefersLocalInvocationPrimitiveOutput{ prefersLocalInvocationPrimitiveOutput_ } + , prefersCompactVertexOutput{ prefersCompactVertexOutput_ } + , prefersCompactPrimitiveOutput{ prefersCompactPrimitiveOutput_ } { } @@ -73304,20 +75222,20 @@ namespace VULKAN_HPP_NAMESPACE uint32_t meshOutputPerVertexGranularity_ = {}, uint32_t meshOutputPerPrimitiveGranularity_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxDrawMeshTasksCount( maxDrawMeshTasksCount_ ) - , maxTaskWorkGroupInvocations( maxTaskWorkGroupInvocations_ ) - , maxTaskWorkGroupSize( maxTaskWorkGroupSize_ ) - , maxTaskTotalMemorySize( maxTaskTotalMemorySize_ ) - , maxTaskOutputCount( maxTaskOutputCount_ ) - , maxMeshWorkGroupInvocations( maxMeshWorkGroupInvocations_ ) - , maxMeshWorkGroupSize( maxMeshWorkGroupSize_ ) - , maxMeshTotalMemorySize( maxMeshTotalMemorySize_ ) - , maxMeshOutputVertices( maxMeshOutputVertices_ ) - , maxMeshOutputPrimitives( maxMeshOutputPrimitives_ ) - , maxMeshMultiviewViewCount( maxMeshMultiviewViewCount_ ) - , meshOutputPerVertexGranularity( meshOutputPerVertexGranularity_ ) - , meshOutputPerPrimitiveGranularity( meshOutputPerPrimitiveGranularity_ ) + : pNext{ pNext_ } + , maxDrawMeshTasksCount{ maxDrawMeshTasksCount_ } + , maxTaskWorkGroupInvocations{ maxTaskWorkGroupInvocations_ } + , maxTaskWorkGroupSize{ maxTaskWorkGroupSize_ } + , maxTaskTotalMemorySize{ maxTaskTotalMemorySize_ } + , maxTaskOutputCount{ maxTaskOutputCount_ } + , maxMeshWorkGroupInvocations{ maxMeshWorkGroupInvocations_ } + , maxMeshWorkGroupSize{ maxMeshWorkGroupSize_ } + , maxMeshTotalMemorySize{ maxMeshTotalMemorySize_ } + , maxMeshOutputVertices{ maxMeshOutputVertices_ } + , maxMeshOutputPrimitives{ maxMeshOutputPrimitives_ } + , maxMeshMultiviewViewCount{ maxMeshMultiviewViewCount_ } + , meshOutputPerVertexGranularity{ meshOutputPerVertexGranularity_ } + , meshOutputPerPrimitiveGranularity{ meshOutputPerPrimitiveGranularity_ } { } @@ -73445,8 +75363,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceMultiDrawFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 multiDraw_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , multiDraw( multiDraw_ ) + : pNext{ pNext_ } + , multiDraw{ multiDraw_ } { } @@ -73541,8 +75459,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceMultiDrawPropertiesEXT( uint32_t maxMultiDrawCount_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxMultiDrawCount( maxMultiDrawCount_ ) + : pNext{ pNext_ } + , maxMultiDrawCount{ maxMultiDrawCount_ } { } @@ -73624,8 +75542,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 multisampledRenderToSingleSampled_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , multisampledRenderToSingleSampled( multisampledRenderToSingleSampled_ ) + : pNext{ pNext_ } + , multisampledRenderToSingleSampled{ multisampledRenderToSingleSampled_ } { } @@ -73728,10 +75646,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 multiviewGeometryShader_ = {}, VULKAN_HPP_NAMESPACE::Bool32 multiviewTessellationShader_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , multiview( multiview_ ) - , multiviewGeometryShader( multiviewGeometryShader_ ) - , multiviewTessellationShader( multiviewTessellationShader_ ) + : pNext{ pNext_ } + , multiview{ multiview_ } + , multiviewGeometryShader{ multiviewGeometryShader_ } + , multiviewTessellationShader{ multiviewTessellationShader_ } { } @@ -73850,8 +75768,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX( VULKAN_HPP_NAMESPACE::Bool32 perViewPositionAllComponents_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , perViewPositionAllComponents( perViewPositionAllComponents_ ) + : pNext{ pNext_ } + , perViewPositionAllComponents{ perViewPositionAllComponents_ } { } @@ -73935,8 +75853,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM( VULKAN_HPP_NAMESPACE::Bool32 multiviewPerViewRenderAreas_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , multiviewPerViewRenderAreas( multiviewPerViewRenderAreas_ ) + : pNext{ pNext_ } + , multiviewPerViewRenderAreas{ multiviewPerViewRenderAreas_ } { } @@ -74035,8 +75953,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM( VULKAN_HPP_NAMESPACE::Bool32 multiviewPerViewViewports_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , multiviewPerViewViewports( multiviewPerViewViewports_ ) + : pNext{ pNext_ } + , multiviewPerViewViewports{ multiviewPerViewViewports_ } { } @@ -74136,9 +76054,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceMultiviewProperties( uint32_t maxMultiviewViewCount_ = {}, uint32_t maxMultiviewInstanceIndex_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxMultiviewViewCount( maxMultiviewViewCount_ ) - , maxMultiviewInstanceIndex( maxMultiviewInstanceIndex_ ) + : pNext{ pNext_ } + , maxMultiviewViewCount{ maxMultiviewViewCount_ } + , maxMultiviewInstanceIndex{ maxMultiviewInstanceIndex_ } { } @@ -74224,8 +76142,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceMutableDescriptorTypeFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 mutableDescriptorType_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , mutableDescriptorType( mutableDescriptorType_ ) + : pNext{ pNext_ } + , mutableDescriptorType{ mutableDescriptorType_ } { } @@ -74327,10 +76245,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 nestedCommandBufferRendering_ = {}, VULKAN_HPP_NAMESPACE::Bool32 nestedCommandBufferSimultaneousUse_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , nestedCommandBuffer( nestedCommandBuffer_ ) - , nestedCommandBufferRendering( nestedCommandBufferRendering_ ) - , nestedCommandBufferSimultaneousUse( nestedCommandBufferSimultaneousUse_ ) + : pNext{ pNext_ } + , nestedCommandBuffer{ nestedCommandBuffer_ } + , nestedCommandBufferRendering{ nestedCommandBufferRendering_ } + , nestedCommandBufferSimultaneousUse{ nestedCommandBufferSimultaneousUse_ } { } @@ -74449,8 +76367,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceNestedCommandBufferPropertiesEXT( uint32_t maxCommandBufferNestingLevel_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxCommandBufferNestingLevel( maxCommandBufferNestingLevel_ ) + : pNext{ pNext_ } + , maxCommandBufferNestingLevel{ maxCommandBufferNestingLevel_ } { } @@ -74548,8 +76466,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceNonSeamlessCubeMapFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 nonSeamlessCubeMap_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , nonSeamlessCubeMap( nonSeamlessCubeMap_ ) + : pNext{ pNext_ } + , nonSeamlessCubeMap{ nonSeamlessCubeMap_ } { } @@ -74648,10 +76566,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 micromapCaptureReplay_ = {}, VULKAN_HPP_NAMESPACE::Bool32 micromapHostCommands_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , micromap( micromap_ ) - , micromapCaptureReplay( micromapCaptureReplay_ ) - , micromapHostCommands( micromapHostCommands_ ) + : pNext{ pNext_ } + , micromap{ micromap_ } + , micromapCaptureReplay{ micromapCaptureReplay_ } + , micromapHostCommands{ micromapHostCommands_ } { } @@ -74769,9 +76687,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceOpacityMicromapPropertiesEXT( uint32_t maxOpacity2StateSubdivisionLevel_ = {}, uint32_t maxOpacity4StateSubdivisionLevel_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxOpacity2StateSubdivisionLevel( maxOpacity2StateSubdivisionLevel_ ) - , maxOpacity4StateSubdivisionLevel( maxOpacity4StateSubdivisionLevel_ ) + : pNext{ pNext_ } + , maxOpacity2StateSubdivisionLevel{ maxOpacity2StateSubdivisionLevel_ } + , maxOpacity4StateSubdivisionLevel{ maxOpacity4StateSubdivisionLevel_ } { } @@ -74854,8 +76772,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceOpticalFlowFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 opticalFlow_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , opticalFlow( opticalFlow_ ) + : pNext{ pNext_ } + , opticalFlow{ opticalFlow_ } { } @@ -74961,18 +76879,18 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxHeight_ = {}, uint32_t maxNumRegionsOfInterest_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , supportedOutputGridSizes( supportedOutputGridSizes_ ) - , supportedHintGridSizes( supportedHintGridSizes_ ) - , hintSupported( hintSupported_ ) - , costSupported( costSupported_ ) - , bidirectionalFlowSupported( bidirectionalFlowSupported_ ) - , globalFlowSupported( globalFlowSupported_ ) - , minWidth( minWidth_ ) - , minHeight( minHeight_ ) - , maxWidth( maxWidth_ ) - , maxHeight( maxHeight_ ) - , maxNumRegionsOfInterest( maxNumRegionsOfInterest_ ) + : pNext{ pNext_ } + , supportedOutputGridSizes{ supportedOutputGridSizes_ } + , supportedHintGridSizes{ supportedHintGridSizes_ } + , hintSupported{ hintSupported_ } + , costSupported{ costSupported_ } + , bidirectionalFlowSupported{ bidirectionalFlowSupported_ } + , globalFlowSupported{ globalFlowSupported_ } + , minWidth{ minWidth_ } + , minHeight{ minHeight_ } + , maxWidth{ maxWidth_ } + , maxHeight{ maxHeight_ } + , maxNumRegionsOfInterest{ maxNumRegionsOfInterest_ } { } @@ -75092,11 +77010,11 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePCIBusInfoPropertiesEXT( uint32_t pciDomain_ = {}, uint32_t pciBus_ = {}, uint32_t pciDevice_ = {}, uint32_t pciFunction_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pciDomain( pciDomain_ ) - , pciBus( pciBus_ ) - , pciDevice( pciDevice_ ) - , pciFunction( pciFunction_ ) + : pNext{ pNext_ } + , pciDomain{ pciDomain_ } + , pciBus{ pciBus_ } + , pciDevice{ pciDevice_ } + , pciFunction{ pciFunction_ } { } @@ -75182,8 +77100,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePageableDeviceLocalMemoryFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 pageableDeviceLocalMemory_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pageableDeviceLocalMemory( pageableDeviceLocalMemory_ ) + : pNext{ pNext_ } + , pageableDeviceLocalMemory{ pageableDeviceLocalMemory_ } { } @@ -75283,9 +77201,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDevicePerStageDescriptorSetFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 perStageDescriptorSet_ = {}, VULKAN_HPP_NAMESPACE::Bool32 dynamicPipelineLayout_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , perStageDescriptorSet( perStageDescriptorSet_ ) - , dynamicPipelineLayout( dynamicPipelineLayout_ ) + : pNext{ pNext_ } + , perStageDescriptorSet{ perStageDescriptorSet_ } + , dynamicPipelineLayout{ dynamicPipelineLayout_ } { } @@ -75393,9 +77311,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDevicePerformanceQueryFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 performanceCounterQueryPools_ = {}, VULKAN_HPP_NAMESPACE::Bool32 performanceCounterMultipleQueryPools_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , performanceCounterQueryPools( performanceCounterQueryPools_ ) - , performanceCounterMultipleQueryPools( performanceCounterMultipleQueryPools_ ) + : pNext{ pNext_ } + , performanceCounterQueryPools{ performanceCounterQueryPools_ } + , performanceCounterMultipleQueryPools{ performanceCounterMultipleQueryPools_ } { } @@ -75501,8 +77419,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePerformanceQueryPropertiesKHR( VULKAN_HPP_NAMESPACE::Bool32 allowCommandBufferQueryCopies_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , allowCommandBufferQueryCopies( allowCommandBufferQueryCopies_ ) + : pNext{ pNext_ } + , allowCommandBufferQueryCopies{ allowCommandBufferQueryCopies_ } { } @@ -75574,6 +77492,257 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDevicePerformanceQueryPropertiesKHR; }; + struct PhysicalDevicePipelineBinaryFeaturesKHR + { + using NativeType = VkPhysicalDevicePipelineBinaryFeaturesKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePipelineBinaryFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineBinaryFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaries_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , pipelineBinaries{ pipelineBinaries_ } + { + } + + VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineBinaryFeaturesKHR( PhysicalDevicePipelineBinaryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevicePipelineBinaryFeaturesKHR( VkPhysicalDevicePipelineBinaryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDevicePipelineBinaryFeaturesKHR( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDevicePipelineBinaryFeaturesKHR & operator=( PhysicalDevicePipelineBinaryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDevicePipelineBinaryFeaturesKHR & operator=( VkPhysicalDevicePipelineBinaryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineBinaryFeaturesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineBinaryFeaturesKHR & setPipelineBinaries( VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaries_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBinaries = pipelineBinaries_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPhysicalDevicePipelineBinaryFeaturesKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevicePipelineBinaryFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, pipelineBinaries ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDevicePipelineBinaryFeaturesKHR const & ) const = default; +#else + bool operator==( PhysicalDevicePipelineBinaryFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( pipelineBinaries == rhs.pipelineBinaries ); +# endif + } + + bool operator!=( PhysicalDevicePipelineBinaryFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePipelineBinaryFeaturesKHR; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaries = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDevicePipelineBinaryFeaturesKHR; + }; + + struct PhysicalDevicePipelineBinaryPropertiesKHR + { + using NativeType = VkPhysicalDevicePipelineBinaryPropertiesKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePipelineBinaryPropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineBinaryPropertiesKHR( VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryInternalCache_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryInternalCacheControl_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryPrefersInternalCache_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryPrecompiledInternalCache_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryCompressedData_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , pipelineBinaryInternalCache{ pipelineBinaryInternalCache_ } + , pipelineBinaryInternalCacheControl{ pipelineBinaryInternalCacheControl_ } + , pipelineBinaryPrefersInternalCache{ pipelineBinaryPrefersInternalCache_ } + , pipelineBinaryPrecompiledInternalCache{ pipelineBinaryPrecompiledInternalCache_ } + , pipelineBinaryCompressedData{ pipelineBinaryCompressedData_ } + { + } + + VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineBinaryPropertiesKHR( PhysicalDevicePipelineBinaryPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevicePipelineBinaryPropertiesKHR( VkPhysicalDevicePipelineBinaryPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDevicePipelineBinaryPropertiesKHR( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDevicePipelineBinaryPropertiesKHR & operator=( PhysicalDevicePipelineBinaryPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDevicePipelineBinaryPropertiesKHR & operator=( VkPhysicalDevicePipelineBinaryPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineBinaryPropertiesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineBinaryPropertiesKHR & + setPipelineBinaryInternalCache( VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryInternalCache_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBinaryInternalCache = pipelineBinaryInternalCache_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineBinaryPropertiesKHR & + setPipelineBinaryInternalCacheControl( VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryInternalCacheControl_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBinaryInternalCacheControl = pipelineBinaryInternalCacheControl_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineBinaryPropertiesKHR & + setPipelineBinaryPrefersInternalCache( VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryPrefersInternalCache_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBinaryPrefersInternalCache = pipelineBinaryPrefersInternalCache_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineBinaryPropertiesKHR & + setPipelineBinaryPrecompiledInternalCache( VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryPrecompiledInternalCache_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBinaryPrecompiledInternalCache = pipelineBinaryPrecompiledInternalCache_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineBinaryPropertiesKHR & + setPipelineBinaryCompressedData( VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryCompressedData_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBinaryCompressedData = pipelineBinaryCompressedData_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPhysicalDevicePipelineBinaryPropertiesKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevicePipelineBinaryPropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, + pNext, + pipelineBinaryInternalCache, + pipelineBinaryInternalCacheControl, + pipelineBinaryPrefersInternalCache, + pipelineBinaryPrecompiledInternalCache, + pipelineBinaryCompressedData ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDevicePipelineBinaryPropertiesKHR const & ) const = default; +#else + bool operator==( PhysicalDevicePipelineBinaryPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( pipelineBinaryInternalCache == rhs.pipelineBinaryInternalCache ) && + ( pipelineBinaryInternalCacheControl == rhs.pipelineBinaryInternalCacheControl ) && + ( pipelineBinaryPrefersInternalCache == rhs.pipelineBinaryPrefersInternalCache ) && + ( pipelineBinaryPrecompiledInternalCache == rhs.pipelineBinaryPrecompiledInternalCache ) && + ( pipelineBinaryCompressedData == rhs.pipelineBinaryCompressedData ); +# endif + } + + bool operator!=( PhysicalDevicePipelineBinaryPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePipelineBinaryPropertiesKHR; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryInternalCache = {}; + VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryInternalCacheControl = {}; + VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryPrefersInternalCache = {}; + VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryPrecompiledInternalCache = {}; + VULKAN_HPP_NAMESPACE::Bool32 pipelineBinaryCompressedData = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDevicePipelineBinaryPropertiesKHR; + }; + struct PhysicalDevicePipelineCreationCacheControlFeatures { using NativeType = VkPhysicalDevicePipelineCreationCacheControlFeatures; @@ -75584,8 +77753,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineCreationCacheControlFeatures( VULKAN_HPP_NAMESPACE::Bool32 pipelineCreationCacheControl_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pipelineCreationCacheControl( pipelineCreationCacheControl_ ) + : pNext{ pNext_ } + , pipelineCreationCacheControl{ pipelineCreationCacheControl_ } { } @@ -75686,8 +77855,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineExecutablePropertiesFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 pipelineExecutableInfo_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pipelineExecutableInfo( pipelineExecutableInfo_ ) + : pNext{ pNext_ } + , pipelineExecutableInfo{ pipelineExecutableInfo_ } { } @@ -75786,8 +77955,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 pipelineLibraryGroupHandles_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pipelineLibraryGroupHandles( pipelineLibraryGroupHandles_ ) + : pNext{ pNext_ } + , pipelineLibraryGroupHandles{ pipelineLibraryGroupHandles_ } { } @@ -75886,8 +78055,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePipelinePropertiesFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 pipelinePropertiesIdentifier_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pipelinePropertiesIdentifier( pipelinePropertiesIdentifier_ ) + : pNext{ pNext_ } + , pipelinePropertiesIdentifier{ pipelinePropertiesIdentifier_ } { } @@ -75984,8 +78153,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineProtectedAccessFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 pipelineProtectedAccess_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pipelineProtectedAccess( pipelineProtectedAccess_ ) + : pNext{ pNext_ } + , pipelineProtectedAccess{ pipelineProtectedAccess_ } { } @@ -76083,8 +78252,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineRobustnessFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 pipelineRobustness_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pipelineRobustness( pipelineRobustness_ ) + : pNext{ pNext_ } + , pipelineRobustness{ pipelineRobustness_ } { } @@ -76189,11 +78358,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT defaultRobustnessImages_ = VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT::eDeviceDefault, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , defaultRobustnessStorageBuffers( defaultRobustnessStorageBuffers_ ) - , defaultRobustnessUniformBuffers( defaultRobustnessUniformBuffers_ ) - , defaultRobustnessVertexInputs( defaultRobustnessVertexInputs_ ) - , defaultRobustnessImages( defaultRobustnessImages_ ) + : pNext{ pNext_ } + , defaultRobustnessStorageBuffers{ defaultRobustnessStorageBuffers_ } + , defaultRobustnessUniformBuffers{ defaultRobustnessUniformBuffers_ } + , defaultRobustnessVertexInputs{ defaultRobustnessVertexInputs_ } + , defaultRobustnessImages{ defaultRobustnessImages_ } { } @@ -76290,8 +78459,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDevicePointClippingProperties( VULKAN_HPP_NAMESPACE::PointClippingBehavior pointClippingBehavior_ = VULKAN_HPP_NAMESPACE::PointClippingBehavior::eAllClipPlanes, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pointClippingBehavior( pointClippingBehavior_ ) + : pNext{ pNext_ } + , pointClippingBehavior{ pointClippingBehavior_ } { } @@ -76390,22 +78559,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 triangleFans_ = {}, VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeAccessBeyondStride_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , constantAlphaColorBlendFactors( constantAlphaColorBlendFactors_ ) - , events( events_ ) - , imageViewFormatReinterpretation( imageViewFormatReinterpretation_ ) - , imageViewFormatSwizzle( imageViewFormatSwizzle_ ) - , imageView2DOn3DImage( imageView2DOn3DImage_ ) - , multisampleArrayImage( multisampleArrayImage_ ) - , mutableComparisonSamplers( mutableComparisonSamplers_ ) - , pointPolygons( pointPolygons_ ) - , samplerMipLodBias( samplerMipLodBias_ ) - , separateStencilMaskRef( separateStencilMaskRef_ ) - , shaderSampleRateInterpolationFunctions( shaderSampleRateInterpolationFunctions_ ) - , tessellationIsolines( tessellationIsolines_ ) - , tessellationPointMode( tessellationPointMode_ ) - , triangleFans( triangleFans_ ) - , vertexAttributeAccessBeyondStride( vertexAttributeAccessBeyondStride_ ) + : pNext{ pNext_ } + , constantAlphaColorBlendFactors{ constantAlphaColorBlendFactors_ } + , events{ events_ } + , imageViewFormatReinterpretation{ imageViewFormatReinterpretation_ } + , imageViewFormatSwizzle{ imageViewFormatSwizzle_ } + , imageView2DOn3DImage{ imageView2DOn3DImage_ } + , multisampleArrayImage{ multisampleArrayImage_ } + , mutableComparisonSamplers{ mutableComparisonSamplers_ } + , pointPolygons{ pointPolygons_ } + , samplerMipLodBias{ samplerMipLodBias_ } + , separateStencilMaskRef{ separateStencilMaskRef_ } + , shaderSampleRateInterpolationFunctions{ shaderSampleRateInterpolationFunctions_ } + , tessellationIsolines{ tessellationIsolines_ } + , tessellationPointMode{ tessellationPointMode_ } + , triangleFans{ triangleFans_ } + , vertexAttributeAccessBeyondStride{ vertexAttributeAccessBeyondStride_ } { } @@ -76653,8 +78822,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePortabilitySubsetPropertiesKHR( uint32_t minVertexInputBindingStrideAlignment_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , minVertexInputBindingStrideAlignment( minVertexInputBindingStrideAlignment_ ) + : pNext{ pNext_ } + , minVertexInputBindingStrideAlignment{ minVertexInputBindingStrideAlignment_ } { } @@ -76752,8 +78921,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePresentBarrierFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 presentBarrier_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , presentBarrier( presentBarrier_ ) + : pNext{ pNext_ } + , presentBarrier{ presentBarrier_ } { } @@ -76848,8 +79017,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePresentIdFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 presentId_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , presentId( presentId_ ) + : pNext{ pNext_ } + , presentId{ presentId_ } { } @@ -76944,8 +79113,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePresentWaitFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 presentWait_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , presentWait( presentWait_ ) + : pNext{ pNext_ } + , presentWait{ presentWait_ } { } @@ -77042,9 +79211,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDevicePrimitiveTopologyListRestartFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 primitiveTopologyListRestart_ = {}, VULKAN_HPP_NAMESPACE::Bool32 primitiveTopologyPatchListRestart_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , primitiveTopologyListRestart( primitiveTopologyListRestart_ ) - , primitiveTopologyPatchListRestart( primitiveTopologyPatchListRestart_ ) + : pNext{ pNext_ } + , primitiveTopologyListRestart{ primitiveTopologyListRestart_ } + , primitiveTopologyPatchListRestart{ primitiveTopologyPatchListRestart_ } { } @@ -77154,10 +79323,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 primitivesGeneratedQueryWithRasterizerDiscard_ = {}, VULKAN_HPP_NAMESPACE::Bool32 primitivesGeneratedQueryWithNonZeroStreams_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , primitivesGeneratedQuery( primitivesGeneratedQuery_ ) - , primitivesGeneratedQueryWithRasterizerDiscard( primitivesGeneratedQueryWithRasterizerDiscard_ ) - , primitivesGeneratedQueryWithNonZeroStreams( primitivesGeneratedQueryWithNonZeroStreams_ ) + : pNext{ pNext_ } + , primitivesGeneratedQuery{ primitivesGeneratedQuery_ } + , primitivesGeneratedQueryWithRasterizerDiscard{ primitivesGeneratedQueryWithRasterizerDiscard_ } + , primitivesGeneratedQueryWithNonZeroStreams{ primitivesGeneratedQueryWithNonZeroStreams_ } { } @@ -77277,8 +79446,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePrivateDataFeatures( VULKAN_HPP_NAMESPACE::Bool32 privateData_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , privateData( privateData_ ) + : pNext{ pNext_ } + , privateData{ privateData_ } { } @@ -77366,334 +79535,6 @@ namespace VULKAN_HPP_NAMESPACE using PhysicalDevicePrivateDataFeaturesEXT = PhysicalDevicePrivateDataFeatures; - struct PhysicalDeviceSparseProperties - { - using NativeType = VkPhysicalDeviceSparseProperties; - -#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceSparseProperties( VULKAN_HPP_NAMESPACE::Bool32 residencyStandard2DBlockShape_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 residencyStandard2DMultisampleBlockShape_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 residencyStandard3DBlockShape_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 residencyAlignedMipSize_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 residencyNonResidentStrict_ = {} ) VULKAN_HPP_NOEXCEPT - : residencyStandard2DBlockShape( residencyStandard2DBlockShape_ ) - , residencyStandard2DMultisampleBlockShape( residencyStandard2DMultisampleBlockShape_ ) - , residencyStandard3DBlockShape( residencyStandard3DBlockShape_ ) - , residencyAlignedMipSize( residencyAlignedMipSize_ ) - , residencyNonResidentStrict( residencyNonResidentStrict_ ) - { - } - - VULKAN_HPP_CONSTEXPR PhysicalDeviceSparseProperties( PhysicalDeviceSparseProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; - - PhysicalDeviceSparseProperties( VkPhysicalDeviceSparseProperties const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceSparseProperties( *reinterpret_cast( &rhs ) ) - { - } - - PhysicalDeviceSparseProperties & operator=( PhysicalDeviceSparseProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; -#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - - PhysicalDeviceSparseProperties & operator=( VkPhysicalDeviceSparseProperties const & rhs ) VULKAN_HPP_NOEXCEPT - { - *this = *reinterpret_cast( &rhs ); - return *this; - } - - operator VkPhysicalDeviceSparseProperties const &() const VULKAN_HPP_NOEXCEPT - { - return *reinterpret_cast( this ); - } - - operator VkPhysicalDeviceSparseProperties &() VULKAN_HPP_NOEXCEPT - { - return *reinterpret_cast( this ); - } - -#if defined( VULKAN_HPP_USE_REFLECT ) -# if 14 <= VULKAN_HPP_CPP_VERSION - auto -# else - std::tuple -# endif - reflect() const VULKAN_HPP_NOEXCEPT - { - return std::tie( residencyStandard2DBlockShape, - residencyStandard2DMultisampleBlockShape, - residencyStandard3DBlockShape, - residencyAlignedMipSize, - residencyNonResidentStrict ); - } -#endif - -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceSparseProperties const & ) const = default; -#else - bool operator==( PhysicalDeviceSparseProperties const & rhs ) const VULKAN_HPP_NOEXCEPT - { -# if defined( VULKAN_HPP_USE_REFLECT ) - return this->reflect() == rhs.reflect(); -# else - return ( residencyStandard2DBlockShape == rhs.residencyStandard2DBlockShape ) && - ( residencyStandard2DMultisampleBlockShape == rhs.residencyStandard2DMultisampleBlockShape ) && - ( residencyStandard3DBlockShape == rhs.residencyStandard3DBlockShape ) && ( residencyAlignedMipSize == rhs.residencyAlignedMipSize ) && - ( residencyNonResidentStrict == rhs.residencyNonResidentStrict ); -# endif - } - - bool operator!=( PhysicalDeviceSparseProperties const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return !operator==( rhs ); - } -#endif - - public: - VULKAN_HPP_NAMESPACE::Bool32 residencyStandard2DBlockShape = {}; - VULKAN_HPP_NAMESPACE::Bool32 residencyStandard2DMultisampleBlockShape = {}; - VULKAN_HPP_NAMESPACE::Bool32 residencyStandard3DBlockShape = {}; - VULKAN_HPP_NAMESPACE::Bool32 residencyAlignedMipSize = {}; - VULKAN_HPP_NAMESPACE::Bool32 residencyNonResidentStrict = {}; - }; - - struct PhysicalDeviceProperties - { - using NativeType = VkPhysicalDeviceProperties; - -#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceProperties( uint32_t apiVersion_ = {}, - uint32_t driverVersion_ = {}, - uint32_t vendorID_ = {}, - uint32_t deviceID_ = {}, - VULKAN_HPP_NAMESPACE::PhysicalDeviceType deviceType_ = VULKAN_HPP_NAMESPACE::PhysicalDeviceType::eOther, - std::array const & deviceName_ = {}, - std::array const & pipelineCacheUUID_ = {}, - VULKAN_HPP_NAMESPACE::PhysicalDeviceLimits limits_ = {}, - VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseProperties sparseProperties_ = {} ) VULKAN_HPP_NOEXCEPT - : apiVersion( apiVersion_ ) - , driverVersion( driverVersion_ ) - , vendorID( vendorID_ ) - , deviceID( deviceID_ ) - , deviceType( deviceType_ ) - , deviceName( deviceName_ ) - , pipelineCacheUUID( pipelineCacheUUID_ ) - , limits( limits_ ) - , sparseProperties( sparseProperties_ ) - { - } - - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceProperties( PhysicalDeviceProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; - - PhysicalDeviceProperties( VkPhysicalDeviceProperties const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceProperties( *reinterpret_cast( &rhs ) ) - { - } - -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PhysicalDeviceProperties( uint32_t apiVersion_, - uint32_t driverVersion_, - uint32_t vendorID_, - uint32_t deviceID_, - VULKAN_HPP_NAMESPACE::PhysicalDeviceType deviceType_, - std::string const & deviceName_, - std::array const & pipelineCacheUUID_ = {}, - VULKAN_HPP_NAMESPACE::PhysicalDeviceLimits limits_ = {}, - VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseProperties sparseProperties_ = {} ) - : apiVersion( apiVersion_ ) - , driverVersion( driverVersion_ ) - , vendorID( vendorID_ ) - , deviceID( deviceID_ ) - , deviceType( deviceType_ ) - , pipelineCacheUUID( pipelineCacheUUID_ ) - , limits( limits_ ) - , sparseProperties( sparseProperties_ ) - { - VULKAN_HPP_ASSERT( deviceName_.size() < VK_MAX_PHYSICAL_DEVICE_NAME_SIZE ); -# if defined( WIN32 ) - strncpy_s( deviceName, VK_MAX_PHYSICAL_DEVICE_NAME_SIZE, deviceName_.data(), deviceName_.size() ); -# else - strncpy( deviceName, deviceName_.data(), std::min( VK_MAX_PHYSICAL_DEVICE_NAME_SIZE, deviceName_.size() ) ); -# endif - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - - PhysicalDeviceProperties & operator=( PhysicalDeviceProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; -#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - - PhysicalDeviceProperties & operator=( VkPhysicalDeviceProperties const & rhs ) VULKAN_HPP_NOEXCEPT - { - *this = *reinterpret_cast( &rhs ); - return *this; - } - - operator VkPhysicalDeviceProperties const &() const VULKAN_HPP_NOEXCEPT - { - return *reinterpret_cast( this ); - } - - operator VkPhysicalDeviceProperties &() VULKAN_HPP_NOEXCEPT - { - return *reinterpret_cast( this ); - } - -#if defined( VULKAN_HPP_USE_REFLECT ) -# if 14 <= VULKAN_HPP_CPP_VERSION - auto -# else - std::tuple const &, - VULKAN_HPP_NAMESPACE::ArrayWrapper1D const &, - VULKAN_HPP_NAMESPACE::PhysicalDeviceLimits const &, - VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseProperties const &> -# endif - reflect() const VULKAN_HPP_NOEXCEPT - { - return std::tie( apiVersion, driverVersion, vendorID, deviceID, deviceType, deviceName, pipelineCacheUUID, limits, sparseProperties ); - } -#endif - -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - std::partial_ordering operator<=>( PhysicalDeviceProperties const & rhs ) const VULKAN_HPP_NOEXCEPT - { - if ( auto cmp = apiVersion <=> rhs.apiVersion; cmp != 0 ) - return cmp; - if ( auto cmp = driverVersion <=> rhs.driverVersion; cmp != 0 ) - return cmp; - if ( auto cmp = vendorID <=> rhs.vendorID; cmp != 0 ) - return cmp; - if ( auto cmp = deviceID <=> rhs.deviceID; cmp != 0 ) - return cmp; - if ( auto cmp = deviceType <=> rhs.deviceType; cmp != 0 ) - return cmp; - if ( auto cmp = strcmp( deviceName, rhs.deviceName ); cmp != 0 ) - return ( cmp < 0 ) ? std::partial_ordering::less : std::partial_ordering::greater; - if ( auto cmp = pipelineCacheUUID <=> rhs.pipelineCacheUUID; cmp != 0 ) - return cmp; - if ( auto cmp = limits <=> rhs.limits; cmp != 0 ) - return cmp; - if ( auto cmp = sparseProperties <=> rhs.sparseProperties; cmp != 0 ) - return cmp; - - return std::partial_ordering::equivalent; - } -#endif - - bool operator==( PhysicalDeviceProperties const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return ( apiVersion == rhs.apiVersion ) && ( driverVersion == rhs.driverVersion ) && ( vendorID == rhs.vendorID ) && ( deviceID == rhs.deviceID ) && - ( deviceType == rhs.deviceType ) && ( strcmp( deviceName, rhs.deviceName ) == 0 ) && ( pipelineCacheUUID == rhs.pipelineCacheUUID ) && - ( limits == rhs.limits ) && ( sparseProperties == rhs.sparseProperties ); - } - - bool operator!=( PhysicalDeviceProperties const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return !operator==( rhs ); - } - - public: - uint32_t apiVersion = {}; - uint32_t driverVersion = {}; - uint32_t vendorID = {}; - uint32_t deviceID = {}; - VULKAN_HPP_NAMESPACE::PhysicalDeviceType deviceType = VULKAN_HPP_NAMESPACE::PhysicalDeviceType::eOther; - VULKAN_HPP_NAMESPACE::ArrayWrapper1D deviceName = {}; - VULKAN_HPP_NAMESPACE::ArrayWrapper1D pipelineCacheUUID = {}; - VULKAN_HPP_NAMESPACE::PhysicalDeviceLimits limits = {}; - VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseProperties sparseProperties = {}; - }; - - struct PhysicalDeviceProperties2 - { - using NativeType = VkPhysicalDeviceProperties2; - - static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceProperties2; - -#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties properties_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , properties( properties_ ) - { - } - - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceProperties2( PhysicalDeviceProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; - - PhysicalDeviceProperties2( VkPhysicalDeviceProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceProperties2( *reinterpret_cast( &rhs ) ) - { - } - - PhysicalDeviceProperties2 & operator=( PhysicalDeviceProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; -#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - - PhysicalDeviceProperties2 & operator=( VkPhysicalDeviceProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT - { - *this = *reinterpret_cast( &rhs ); - return *this; - } - - operator VkPhysicalDeviceProperties2 const &() const VULKAN_HPP_NOEXCEPT - { - return *reinterpret_cast( this ); - } - - operator VkPhysicalDeviceProperties2 &() VULKAN_HPP_NOEXCEPT - { - return *reinterpret_cast( this ); - } - -#if defined( VULKAN_HPP_USE_REFLECT ) -# if 14 <= VULKAN_HPP_CPP_VERSION - auto -# else - std::tuple -# endif - reflect() const VULKAN_HPP_NOEXCEPT - { - return std::tie( sType, pNext, properties ); - } -#endif - -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceProperties2 const & ) const = default; -#else - bool operator==( PhysicalDeviceProperties2 const & rhs ) const VULKAN_HPP_NOEXCEPT - { -# if defined( VULKAN_HPP_USE_REFLECT ) - return this->reflect() == rhs.reflect(); -# else - return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( properties == rhs.properties ); -# endif - } - - bool operator!=( PhysicalDeviceProperties2 const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return !operator==( rhs ); - } -#endif - - public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceProperties2; - void * pNext = {}; - VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties properties = {}; - }; - - template <> - struct CppType - { - using Type = PhysicalDeviceProperties2; - }; - - using PhysicalDeviceProperties2KHR = PhysicalDeviceProperties2; - struct PhysicalDeviceProtectedMemoryFeatures { using NativeType = VkPhysicalDeviceProtectedMemoryFeatures; @@ -77704,8 +79545,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceProtectedMemoryFeatures( VULKAN_HPP_NAMESPACE::Bool32 protectedMemory_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , protectedMemory( protectedMemory_ ) + : pNext{ pNext_ } + , protectedMemory{ protectedMemory_ } { } @@ -77801,8 +79642,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceProtectedMemoryProperties( VULKAN_HPP_NAMESPACE::Bool32 protectedNoFault_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , protectedNoFault( protectedNoFault_ ) + : pNext{ pNext_ } + , protectedNoFault{ protectedNoFault_ } { } @@ -77885,9 +79726,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceProvokingVertexFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 provokingVertexLast_ = {}, VULKAN_HPP_NAMESPACE::Bool32 transformFeedbackPreservesProvokingVertex_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , provokingVertexLast( provokingVertexLast_ ) - , transformFeedbackPreservesProvokingVertex( transformFeedbackPreservesProvokingVertex_ ) + : pNext{ pNext_ } + , provokingVertexLast{ provokingVertexLast_ } + , transformFeedbackPreservesProvokingVertex{ transformFeedbackPreservesProvokingVertex_ } { } @@ -77994,9 +79835,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceProvokingVertexPropertiesEXT( VULKAN_HPP_NAMESPACE::Bool32 provokingVertexModePerPipeline_ = {}, VULKAN_HPP_NAMESPACE::Bool32 transformFeedbackPreservesTriangleFanProvokingVertex_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , provokingVertexModePerPipeline( provokingVertexModePerPipeline_ ) - , transformFeedbackPreservesTriangleFanProvokingVertex( transformFeedbackPreservesTriangleFanProvokingVertex_ ) + : pNext{ pNext_ } + , provokingVertexModePerPipeline{ provokingVertexModePerPipeline_ } + , transformFeedbackPreservesTriangleFanProvokingVertex{ transformFeedbackPreservesTriangleFanProvokingVertex_ } { } @@ -78079,8 +79920,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDevicePushDescriptorPropertiesKHR( uint32_t maxPushDescriptors_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxPushDescriptors( maxPushDescriptors_ ) + : pNext{ pNext_ } + , maxPushDescriptors{ maxPushDescriptors_ } { } @@ -78162,8 +80003,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceRGBA10X6FormatsFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 formatRgba10x6WithoutYCbCrSampler_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , formatRgba10x6WithoutYCbCrSampler( formatRgba10x6WithoutYCbCrSampler_ ) + : pNext{ pNext_ } + , formatRgba10x6WithoutYCbCrSampler{ formatRgba10x6WithoutYCbCrSampler_ } { } @@ -78263,10 +80104,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 rasterizationOrderDepthAttachmentAccess_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rasterizationOrderStencilAttachmentAccess_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , rasterizationOrderColorAttachmentAccess( rasterizationOrderColorAttachmentAccess_ ) - , rasterizationOrderDepthAttachmentAccess( rasterizationOrderDepthAttachmentAccess_ ) - , rasterizationOrderStencilAttachmentAccess( rasterizationOrderStencilAttachmentAccess_ ) + : pNext{ pNext_ } + , rasterizationOrderColorAttachmentAccess{ rasterizationOrderColorAttachmentAccess_ } + , rasterizationOrderDepthAttachmentAccess{ rasterizationOrderDepthAttachmentAccess_ } + , rasterizationOrderStencilAttachmentAccess{ rasterizationOrderStencilAttachmentAccess_ } { } @@ -78392,8 +80233,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceRawAccessChainsFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 shaderRawAccessChains_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderRawAccessChains( shaderRawAccessChains_ ) + : pNext{ pNext_ } + , shaderRawAccessChains{ shaderRawAccessChains_ } { } @@ -78489,8 +80330,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceRayQueryFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 rayQuery_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , rayQuery( rayQuery_ ) + : pNext{ pNext_ } + , rayQuery{ rayQuery_ } { } @@ -78586,8 +80427,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingInvocationReorderFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 rayTracingInvocationReorder_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , rayTracingInvocationReorder( rayTracingInvocationReorder_ ) + : pNext{ pNext_ } + , rayTracingInvocationReorder{ rayTracingInvocationReorder_ } { } @@ -78688,8 +80529,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::RayTracingInvocationReorderModeNV rayTracingInvocationReorderReorderingHint_ = VULKAN_HPP_NAMESPACE::RayTracingInvocationReorderModeNV::eNone, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , rayTracingInvocationReorderReorderingHint( rayTracingInvocationReorderReorderingHint_ ) + : pNext{ pNext_ } + , rayTracingInvocationReorderReorderingHint{ rayTracingInvocationReorderReorderingHint_ } { } @@ -78775,9 +80616,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingMaintenance1FeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 rayTracingMaintenance1_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineTraceRaysIndirect2_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , rayTracingMaintenance1( rayTracingMaintenance1_ ) - , rayTracingPipelineTraceRaysIndirect2( rayTracingPipelineTraceRaysIndirect2_ ) + : pNext{ pNext_ } + , rayTracingMaintenance1{ rayTracingMaintenance1_ } + , rayTracingPipelineTraceRaysIndirect2{ rayTracingPipelineTraceRaysIndirect2_ } { } @@ -78885,9 +80726,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingMotionBlurFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 rayTracingMotionBlur_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingMotionBlurPipelineTraceRaysIndirect_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , rayTracingMotionBlur( rayTracingMotionBlur_ ) - , rayTracingMotionBlurPipelineTraceRaysIndirect( rayTracingMotionBlurPipelineTraceRaysIndirect_ ) + : pNext{ pNext_ } + , rayTracingMotionBlur{ rayTracingMotionBlur_ } + , rayTracingMotionBlurPipelineTraceRaysIndirect{ rayTracingMotionBlurPipelineTraceRaysIndirect_ } { } @@ -78997,12 +80838,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineTraceRaysIndirect_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTraversalPrimitiveCulling_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , rayTracingPipeline( rayTracingPipeline_ ) - , rayTracingPipelineShaderGroupHandleCaptureReplay( rayTracingPipelineShaderGroupHandleCaptureReplay_ ) - , rayTracingPipelineShaderGroupHandleCaptureReplayMixed( rayTracingPipelineShaderGroupHandleCaptureReplayMixed_ ) - , rayTracingPipelineTraceRaysIndirect( rayTracingPipelineTraceRaysIndirect_ ) - , rayTraversalPrimitiveCulling( rayTraversalPrimitiveCulling_ ) + : pNext{ pNext_ } + , rayTracingPipeline{ rayTracingPipeline_ } + , rayTracingPipelineShaderGroupHandleCaptureReplay{ rayTracingPipelineShaderGroupHandleCaptureReplay_ } + , rayTracingPipelineShaderGroupHandleCaptureReplayMixed{ rayTracingPipelineShaderGroupHandleCaptureReplayMixed_ } + , rayTracingPipelineTraceRaysIndirect{ rayTracingPipelineTraceRaysIndirect_ } + , rayTraversalPrimitiveCulling{ rayTraversalPrimitiveCulling_ } { } @@ -79154,15 +80995,15 @@ namespace VULKAN_HPP_NAMESPACE uint32_t shaderGroupHandleAlignment_ = {}, uint32_t maxRayHitAttributeSize_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderGroupHandleSize( shaderGroupHandleSize_ ) - , maxRayRecursionDepth( maxRayRecursionDepth_ ) - , maxShaderGroupStride( maxShaderGroupStride_ ) - , shaderGroupBaseAlignment( shaderGroupBaseAlignment_ ) - , shaderGroupHandleCaptureReplaySize( shaderGroupHandleCaptureReplaySize_ ) - , maxRayDispatchInvocationCount( maxRayDispatchInvocationCount_ ) - , shaderGroupHandleAlignment( shaderGroupHandleAlignment_ ) - , maxRayHitAttributeSize( maxRayHitAttributeSize_ ) + : pNext{ pNext_ } + , shaderGroupHandleSize{ shaderGroupHandleSize_ } + , maxRayRecursionDepth{ maxRayRecursionDepth_ } + , maxShaderGroupStride{ maxShaderGroupStride_ } + , shaderGroupBaseAlignment{ shaderGroupBaseAlignment_ } + , shaderGroupHandleCaptureReplaySize{ shaderGroupHandleCaptureReplaySize_ } + , maxRayDispatchInvocationCount{ maxRayDispatchInvocationCount_ } + , shaderGroupHandleAlignment{ shaderGroupHandleAlignment_ } + , maxRayHitAttributeSize{ maxRayHitAttributeSize_ } { } @@ -79274,8 +81115,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingPositionFetchFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 rayTracingPositionFetch_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , rayTracingPositionFetch( rayTracingPositionFetch_ ) + : pNext{ pNext_ } + , rayTracingPositionFetch{ rayTracingPositionFetch_ } { } @@ -79380,15 +81221,15 @@ namespace VULKAN_HPP_NAMESPACE uint64_t maxTriangleCount_ = {}, uint32_t maxDescriptorSetAccelerationStructures_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderGroupHandleSize( shaderGroupHandleSize_ ) - , maxRecursionDepth( maxRecursionDepth_ ) - , maxShaderGroupStride( maxShaderGroupStride_ ) - , shaderGroupBaseAlignment( shaderGroupBaseAlignment_ ) - , maxGeometryCount( maxGeometryCount_ ) - , maxInstanceCount( maxInstanceCount_ ) - , maxTriangleCount( maxTriangleCount_ ) - , maxDescriptorSetAccelerationStructures( maxDescriptorSetAccelerationStructures_ ) + : pNext{ pNext_ } + , shaderGroupHandleSize{ shaderGroupHandleSize_ } + , maxRecursionDepth{ maxRecursionDepth_ } + , maxShaderGroupStride{ maxShaderGroupStride_ } + , shaderGroupBaseAlignment{ shaderGroupBaseAlignment_ } + , maxGeometryCount{ maxGeometryCount_ } + , maxInstanceCount{ maxInstanceCount_ } + , maxTriangleCount{ maxTriangleCount_ } + , maxDescriptorSetAccelerationStructures{ maxDescriptorSetAccelerationStructures_ } { } @@ -79499,8 +81340,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingValidationFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 rayTracingValidation_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , rayTracingValidation( rayTracingValidation_ ) + : pNext{ pNext_ } + , rayTracingValidation{ rayTracingValidation_ } { } @@ -79597,8 +81438,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceRelaxedLineRasterizationFeaturesIMG( VULKAN_HPP_NAMESPACE::Bool32 relaxedLineRasterization_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , relaxedLineRasterization( relaxedLineRasterization_ ) + : pNext{ pNext_ } + , relaxedLineRasterization{ relaxedLineRasterization_ } { } @@ -79697,8 +81538,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceRenderPassStripedFeaturesARM( VULKAN_HPP_NAMESPACE::Bool32 renderPassStriped_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , renderPassStriped( renderPassStriped_ ) + : pNext{ pNext_ } + , renderPassStriped{ renderPassStriped_ } { } @@ -79796,9 +81637,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceRenderPassStripedPropertiesARM( VULKAN_HPP_NAMESPACE::Extent2D renderPassStripeGranularity_ = {}, uint32_t maxRenderPassStripes_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , renderPassStripeGranularity( renderPassStripeGranularity_ ) - , maxRenderPassStripes( maxRenderPassStripes_ ) + : pNext{ pNext_ } + , renderPassStripeGranularity{ renderPassStripeGranularity_ } + , maxRenderPassStripes{ maxRenderPassStripes_ } { } @@ -79882,8 +81723,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceRepresentativeFragmentTestFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 representativeFragmentTest_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , representativeFragmentTest( representativeFragmentTest_ ) + : pNext{ pNext_ } + , representativeFragmentTest{ representativeFragmentTest_ } { } @@ -79984,10 +81825,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 robustImageAccess2_ = {}, VULKAN_HPP_NAMESPACE::Bool32 nullDescriptor_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , robustBufferAccess2( robustBufferAccess2_ ) - , robustImageAccess2( robustImageAccess2_ ) - , nullDescriptor( nullDescriptor_ ) + : pNext{ pNext_ } + , robustBufferAccess2{ robustBufferAccess2_ } + , robustImageAccess2{ robustImageAccess2_ } + , nullDescriptor{ nullDescriptor_ } { } @@ -80104,9 +81945,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceRobustness2PropertiesEXT( VULKAN_HPP_NAMESPACE::DeviceSize robustStorageBufferAccessSizeAlignment_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize robustUniformBufferAccessSizeAlignment_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , robustStorageBufferAccessSizeAlignment( robustStorageBufferAccessSizeAlignment_ ) - , robustUniformBufferAccessSizeAlignment( robustUniformBufferAccessSizeAlignment_ ) + : pNext{ pNext_ } + , robustStorageBufferAccessSizeAlignment{ robustStorageBufferAccessSizeAlignment_ } + , robustUniformBufferAccessSizeAlignment{ robustUniformBufferAccessSizeAlignment_ } { } @@ -80194,12 +82035,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t sampleLocationSubPixelBits_ = {}, VULKAN_HPP_NAMESPACE::Bool32 variableSampleLocations_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , sampleLocationSampleCounts( sampleLocationSampleCounts_ ) - , maxSampleLocationGridSize( maxSampleLocationGridSize_ ) - , sampleLocationCoordinateRange( sampleLocationCoordinateRange_ ) - , sampleLocationSubPixelBits( sampleLocationSubPixelBits_ ) - , variableSampleLocations( variableSampleLocations_ ) + : pNext{ pNext_ } + , sampleLocationSampleCounts{ sampleLocationSampleCounts_ } + , maxSampleLocationGridSize{ maxSampleLocationGridSize_ } + , sampleLocationCoordinateRange{ sampleLocationCoordinateRange_ } + , sampleLocationSubPixelBits{ sampleLocationSubPixelBits_ } + , variableSampleLocations{ variableSampleLocations_ } { } @@ -80300,9 +82141,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceSamplerFilterMinmaxProperties( VULKAN_HPP_NAMESPACE::Bool32 filterMinmaxSingleComponentFormats_ = {}, VULKAN_HPP_NAMESPACE::Bool32 filterMinmaxImageComponentMapping_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , filterMinmaxSingleComponentFormats( filterMinmaxSingleComponentFormats_ ) - , filterMinmaxImageComponentMapping( filterMinmaxImageComponentMapping_ ) + : pNext{ pNext_ } + , filterMinmaxSingleComponentFormats{ filterMinmaxSingleComponentFormats_ } + , filterMinmaxImageComponentMapping{ filterMinmaxImageComponentMapping_ } { } @@ -80388,8 +82229,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceSamplerYcbcrConversionFeatures( VULKAN_HPP_NAMESPACE::Bool32 samplerYcbcrConversion_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , samplerYcbcrConversion( samplerYcbcrConversion_ ) + : pNext{ pNext_ } + , samplerYcbcrConversion{ samplerYcbcrConversion_ } { } @@ -80488,8 +82329,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceScalarBlockLayoutFeatures( VULKAN_HPP_NAMESPACE::Bool32 scalarBlockLayout_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , scalarBlockLayout( scalarBlockLayout_ ) + : pNext{ pNext_ } + , scalarBlockLayout{ scalarBlockLayout_ } { } @@ -80588,8 +82429,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceSchedulingControlsFeaturesARM( VULKAN_HPP_NAMESPACE::Bool32 schedulingControls_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , schedulingControls( schedulingControls_ ) + : pNext{ pNext_ } + , schedulingControls{ schedulingControls_ } { } @@ -80687,8 +82528,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceSchedulingControlsPropertiesARM( VULKAN_HPP_NAMESPACE::PhysicalDeviceSchedulingControlsFlagsARM schedulingControlsFlags_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , schedulingControlsFlags( schedulingControlsFlags_ ) + : pNext{ pNext_ } + , schedulingControlsFlags{ schedulingControlsFlags_ } { } @@ -80786,8 +82627,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceSeparateDepthStencilLayoutsFeatures( VULKAN_HPP_NAMESPACE::Bool32 separateDepthStencilLayouts_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , separateDepthStencilLayouts( separateDepthStencilLayouts_ ) + : pNext{ pNext_ } + , separateDepthStencilLayouts{ separateDepthStencilLayouts_ } { } @@ -80888,8 +82729,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderAtomicFloat16VectorFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 shaderFloat16VectorAtomics_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderFloat16VectorAtomics( shaderFloat16VectorAtomics_ ) + : pNext{ pNext_ } + , shaderFloat16VectorAtomics{ shaderFloat16VectorAtomics_ } { } @@ -80999,19 +82840,19 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 shaderImageFloat32AtomicMinMax_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseImageFloat32AtomicMinMax_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderBufferFloat16Atomics( shaderBufferFloat16Atomics_ ) - , shaderBufferFloat16AtomicAdd( shaderBufferFloat16AtomicAdd_ ) - , shaderBufferFloat16AtomicMinMax( shaderBufferFloat16AtomicMinMax_ ) - , shaderBufferFloat32AtomicMinMax( shaderBufferFloat32AtomicMinMax_ ) - , shaderBufferFloat64AtomicMinMax( shaderBufferFloat64AtomicMinMax_ ) - , shaderSharedFloat16Atomics( shaderSharedFloat16Atomics_ ) - , shaderSharedFloat16AtomicAdd( shaderSharedFloat16AtomicAdd_ ) - , shaderSharedFloat16AtomicMinMax( shaderSharedFloat16AtomicMinMax_ ) - , shaderSharedFloat32AtomicMinMax( shaderSharedFloat32AtomicMinMax_ ) - , shaderSharedFloat64AtomicMinMax( shaderSharedFloat64AtomicMinMax_ ) - , shaderImageFloat32AtomicMinMax( shaderImageFloat32AtomicMinMax_ ) - , sparseImageFloat32AtomicMinMax( sparseImageFloat32AtomicMinMax_ ) + : pNext{ pNext_ } + , shaderBufferFloat16Atomics{ shaderBufferFloat16Atomics_ } + , shaderBufferFloat16AtomicAdd{ shaderBufferFloat16AtomicAdd_ } + , shaderBufferFloat16AtomicMinMax{ shaderBufferFloat16AtomicMinMax_ } + , shaderBufferFloat32AtomicMinMax{ shaderBufferFloat32AtomicMinMax_ } + , shaderBufferFloat64AtomicMinMax{ shaderBufferFloat64AtomicMinMax_ } + , shaderSharedFloat16Atomics{ shaderSharedFloat16Atomics_ } + , shaderSharedFloat16AtomicAdd{ shaderSharedFloat16AtomicAdd_ } + , shaderSharedFloat16AtomicMinMax{ shaderSharedFloat16AtomicMinMax_ } + , shaderSharedFloat32AtomicMinMax{ shaderSharedFloat32AtomicMinMax_ } + , shaderSharedFloat64AtomicMinMax{ shaderSharedFloat64AtomicMinMax_ } + , shaderImageFloat32AtomicMinMax{ shaderImageFloat32AtomicMinMax_ } + , sparseImageFloat32AtomicMinMax{ sparseImageFloat32AtomicMinMax_ } { } @@ -81243,19 +83084,19 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 sparseImageFloat32Atomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseImageFloat32AtomicAdd_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderBufferFloat32Atomics( shaderBufferFloat32Atomics_ ) - , shaderBufferFloat32AtomicAdd( shaderBufferFloat32AtomicAdd_ ) - , shaderBufferFloat64Atomics( shaderBufferFloat64Atomics_ ) - , shaderBufferFloat64AtomicAdd( shaderBufferFloat64AtomicAdd_ ) - , shaderSharedFloat32Atomics( shaderSharedFloat32Atomics_ ) - , shaderSharedFloat32AtomicAdd( shaderSharedFloat32AtomicAdd_ ) - , shaderSharedFloat64Atomics( shaderSharedFloat64Atomics_ ) - , shaderSharedFloat64AtomicAdd( shaderSharedFloat64AtomicAdd_ ) - , shaderImageFloat32Atomics( shaderImageFloat32Atomics_ ) - , shaderImageFloat32AtomicAdd( shaderImageFloat32AtomicAdd_ ) - , sparseImageFloat32Atomics( sparseImageFloat32Atomics_ ) - , sparseImageFloat32AtomicAdd( sparseImageFloat32AtomicAdd_ ) + : pNext{ pNext_ } + , shaderBufferFloat32Atomics{ shaderBufferFloat32Atomics_ } + , shaderBufferFloat32AtomicAdd{ shaderBufferFloat32AtomicAdd_ } + , shaderBufferFloat64Atomics{ shaderBufferFloat64Atomics_ } + , shaderBufferFloat64AtomicAdd{ shaderBufferFloat64AtomicAdd_ } + , shaderSharedFloat32Atomics{ shaderSharedFloat32Atomics_ } + , shaderSharedFloat32AtomicAdd{ shaderSharedFloat32AtomicAdd_ } + , shaderSharedFloat64Atomics{ shaderSharedFloat64Atomics_ } + , shaderSharedFloat64AtomicAdd{ shaderSharedFloat64AtomicAdd_ } + , shaderImageFloat32Atomics{ shaderImageFloat32Atomics_ } + , shaderImageFloat32AtomicAdd{ shaderImageFloat32AtomicAdd_ } + , sparseImageFloat32Atomics{ sparseImageFloat32Atomics_ } + , sparseImageFloat32AtomicAdd{ sparseImageFloat32AtomicAdd_ } { } @@ -81473,9 +83314,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderAtomicInt64Features( VULKAN_HPP_NAMESPACE::Bool32 shaderBufferInt64Atomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSharedInt64Atomics_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderBufferInt64Atomics( shaderBufferInt64Atomics_ ) - , shaderSharedInt64Atomics( shaderSharedInt64Atomics_ ) + : pNext{ pNext_ } + , shaderBufferInt64Atomics{ shaderBufferInt64Atomics_ } + , shaderSharedInt64Atomics{ shaderSharedInt64Atomics_ } { } @@ -81584,9 +83425,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderClockFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupClock_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDeviceClock_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderSubgroupClock( shaderSubgroupClock_ ) - , shaderDeviceClock( shaderDeviceClock_ ) + : pNext{ pNext_ } + , shaderSubgroupClock{ shaderSubgroupClock_ } + , shaderDeviceClock{ shaderDeviceClock_ } { } @@ -81691,8 +83532,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderCoreBuiltinsFeaturesARM( VULKAN_HPP_NAMESPACE::Bool32 shaderCoreBuiltins_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderCoreBuiltins( shaderCoreBuiltins_ ) + : pNext{ pNext_ } + , shaderCoreBuiltins{ shaderCoreBuiltins_ } { } @@ -81791,10 +83632,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t shaderCoreCount_ = {}, uint32_t shaderWarpsPerCore_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderCoreMask( shaderCoreMask_ ) - , shaderCoreCount( shaderCoreCount_ ) - , shaderWarpsPerCore( shaderWarpsPerCore_ ) + : pNext{ pNext_ } + , shaderCoreMask{ shaderCoreMask_ } + , shaderCoreCount{ shaderCoreCount_ } + , shaderWarpsPerCore{ shaderWarpsPerCore_ } { } @@ -81881,9 +83722,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderCoreProperties2AMD( VULKAN_HPP_NAMESPACE::ShaderCorePropertiesFlagsAMD shaderCoreFeatures_ = {}, uint32_t activeComputeUnitCount_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderCoreFeatures( shaderCoreFeatures_ ) - , activeComputeUnitCount( activeComputeUnitCount_ ) + : pNext{ pNext_ } + , shaderCoreFeatures{ shaderCoreFeatures_ } + , activeComputeUnitCount{ activeComputeUnitCount_ } { } @@ -81980,21 +83821,21 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxVgprAllocation_ = {}, uint32_t vgprAllocationGranularity_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderEngineCount( shaderEngineCount_ ) - , shaderArraysPerEngineCount( shaderArraysPerEngineCount_ ) - , computeUnitsPerShaderArray( computeUnitsPerShaderArray_ ) - , simdPerComputeUnit( simdPerComputeUnit_ ) - , wavefrontsPerSimd( wavefrontsPerSimd_ ) - , wavefrontSize( wavefrontSize_ ) - , sgprsPerSimd( sgprsPerSimd_ ) - , minSgprAllocation( minSgprAllocation_ ) - , maxSgprAllocation( maxSgprAllocation_ ) - , sgprAllocationGranularity( sgprAllocationGranularity_ ) - , vgprsPerSimd( vgprsPerSimd_ ) - , minVgprAllocation( minVgprAllocation_ ) - , maxVgprAllocation( maxVgprAllocation_ ) - , vgprAllocationGranularity( vgprAllocationGranularity_ ) + : pNext{ pNext_ } + , shaderEngineCount{ shaderEngineCount_ } + , shaderArraysPerEngineCount{ shaderArraysPerEngineCount_ } + , computeUnitsPerShaderArray{ computeUnitsPerShaderArray_ } + , simdPerComputeUnit{ simdPerComputeUnit_ } + , wavefrontsPerSimd{ wavefrontsPerSimd_ } + , wavefrontSize{ wavefrontSize_ } + , sgprsPerSimd{ sgprsPerSimd_ } + , minSgprAllocation{ minSgprAllocation_ } + , maxSgprAllocation{ maxSgprAllocation_ } + , sgprAllocationGranularity{ sgprAllocationGranularity_ } + , vgprsPerSimd{ vgprsPerSimd_ } + , minVgprAllocation{ minVgprAllocation_ } + , maxVgprAllocation{ maxVgprAllocation_ } + , vgprAllocationGranularity{ vgprAllocationGranularity_ } { } @@ -82127,10 +83968,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t texelRate_ = {}, uint32_t fmaRate_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pixelRate( pixelRate_ ) - , texelRate( texelRate_ ) - , fmaRate( fmaRate_ ) + : pNext{ pNext_ } + , pixelRate{ pixelRate_ } + , texelRate{ texelRate_ } + , fmaRate{ fmaRate_ } { } @@ -82214,8 +84055,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderDemoteToHelperInvocationFeatures( VULKAN_HPP_NAMESPACE::Bool32 shaderDemoteToHelperInvocation_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderDemoteToHelperInvocation( shaderDemoteToHelperInvocation_ ) + : pNext{ pNext_ } + , shaderDemoteToHelperInvocation{ shaderDemoteToHelperInvocation_ } { } @@ -82316,8 +84157,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderDrawParametersFeatures( VULKAN_HPP_NAMESPACE::Bool32 shaderDrawParameters_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderDrawParameters( shaderDrawParameters_ ) + : pNext{ pNext_ } + , shaderDrawParameters{ shaderDrawParameters_ } { } @@ -82416,8 +84257,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD( VULKAN_HPP_NAMESPACE::Bool32 shaderEarlyAndLateFragmentTests_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderEarlyAndLateFragmentTests( shaderEarlyAndLateFragmentTests_ ) + : pNext{ pNext_ } + , shaderEarlyAndLateFragmentTests{ shaderEarlyAndLateFragmentTests_ } { } @@ -82518,8 +84359,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderEnqueueFeaturesAMDX( VULKAN_HPP_NAMESPACE::Bool32 shaderEnqueue_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderEnqueue( shaderEnqueue_ ) + : pNext{ pNext_ } + , shaderEnqueue{ shaderEnqueue_ } { } @@ -82621,12 +84462,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxExecutionGraphShaderPayloadCount_ = {}, uint32_t executionGraphDispatchAddressAlignment_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxExecutionGraphDepth( maxExecutionGraphDepth_ ) - , maxExecutionGraphShaderOutputNodes( maxExecutionGraphShaderOutputNodes_ ) - , maxExecutionGraphShaderPayloadSize( maxExecutionGraphShaderPayloadSize_ ) - , maxExecutionGraphShaderPayloadCount( maxExecutionGraphShaderPayloadCount_ ) - , executionGraphDispatchAddressAlignment( executionGraphDispatchAddressAlignment_ ) + : pNext{ pNext_ } + , maxExecutionGraphDepth{ maxExecutionGraphDepth_ } + , maxExecutionGraphShaderOutputNodes{ maxExecutionGraphShaderOutputNodes_ } + , maxExecutionGraphShaderPayloadSize{ maxExecutionGraphShaderPayloadSize_ } + , maxExecutionGraphShaderPayloadCount{ maxExecutionGraphShaderPayloadCount_ } + , executionGraphDispatchAddressAlignment{ executionGraphDispatchAddressAlignment_ } { } @@ -82771,8 +84612,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderExpectAssumeFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 shaderExpectAssume_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderExpectAssume( shaderExpectAssume_ ) + : pNext{ pNext_ } + , shaderExpectAssume{ shaderExpectAssume_ } { } @@ -82870,9 +84711,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderFloat16Int8Features( VULKAN_HPP_NAMESPACE::Bool32 shaderFloat16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderInt8_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderFloat16( shaderFloat16_ ) - , shaderInt8( shaderInt8_ ) + : pNext{ pNext_ } + , shaderFloat16{ shaderFloat16_ } + , shaderInt8{ shaderInt8_ } { } @@ -82978,8 +84819,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderFloatControls2FeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 shaderFloatControls2_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderFloatControls2( shaderFloatControls2_ ) + : pNext{ pNext_ } + , shaderFloatControls2{ shaderFloatControls2_ } { } @@ -83078,9 +84919,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderImageAtomicInt64FeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 shaderImageInt64Atomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseImageInt64Atomics_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderImageInt64Atomics( shaderImageInt64Atomics_ ) - , sparseImageInt64Atomics( sparseImageInt64Atomics_ ) + : pNext{ pNext_ } + , shaderImageInt64Atomics{ shaderImageInt64Atomics_ } + , sparseImageInt64Atomics{ sparseImageInt64Atomics_ } { } @@ -83187,8 +85028,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderImageFootprintFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 imageFootprint_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageFootprint( imageFootprint_ ) + : pNext{ pNext_ } + , imageFootprint{ imageFootprint_ } { } @@ -83284,8 +85125,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderIntegerDotProductFeatures( VULKAN_HPP_NAMESPACE::Bool32 shaderIntegerDotProduct_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderIntegerDotProduct( shaderIntegerDotProduct_ ) + : pNext{ pNext_ } + , shaderIntegerDotProduct{ shaderIntegerDotProduct_ } { } @@ -83415,38 +85256,37 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 integerDotProductAccumulatingSaturating64BitSignedAccelerated_ = {}, VULKAN_HPP_NAMESPACE::Bool32 integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , integerDotProduct8BitUnsignedAccelerated( integerDotProduct8BitUnsignedAccelerated_ ) - , integerDotProduct8BitSignedAccelerated( integerDotProduct8BitSignedAccelerated_ ) - , integerDotProduct8BitMixedSignednessAccelerated( integerDotProduct8BitMixedSignednessAccelerated_ ) - , integerDotProduct4x8BitPackedUnsignedAccelerated( integerDotProduct4x8BitPackedUnsignedAccelerated_ ) - , integerDotProduct4x8BitPackedSignedAccelerated( integerDotProduct4x8BitPackedSignedAccelerated_ ) - , integerDotProduct4x8BitPackedMixedSignednessAccelerated( integerDotProduct4x8BitPackedMixedSignednessAccelerated_ ) - , integerDotProduct16BitUnsignedAccelerated( integerDotProduct16BitUnsignedAccelerated_ ) - , integerDotProduct16BitSignedAccelerated( integerDotProduct16BitSignedAccelerated_ ) - , integerDotProduct16BitMixedSignednessAccelerated( integerDotProduct16BitMixedSignednessAccelerated_ ) - , integerDotProduct32BitUnsignedAccelerated( integerDotProduct32BitUnsignedAccelerated_ ) - , integerDotProduct32BitSignedAccelerated( integerDotProduct32BitSignedAccelerated_ ) - , integerDotProduct32BitMixedSignednessAccelerated( integerDotProduct32BitMixedSignednessAccelerated_ ) - , integerDotProduct64BitUnsignedAccelerated( integerDotProduct64BitUnsignedAccelerated_ ) - , integerDotProduct64BitSignedAccelerated( integerDotProduct64BitSignedAccelerated_ ) - , integerDotProduct64BitMixedSignednessAccelerated( integerDotProduct64BitMixedSignednessAccelerated_ ) - , integerDotProductAccumulatingSaturating8BitUnsignedAccelerated( integerDotProductAccumulatingSaturating8BitUnsignedAccelerated_ ) - , integerDotProductAccumulatingSaturating8BitSignedAccelerated( integerDotProductAccumulatingSaturating8BitSignedAccelerated_ ) - , integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated( integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated_ ) - , integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated( integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated_ ) - , integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated( integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated_ ) - , integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated( - integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated_ ) - , integerDotProductAccumulatingSaturating16BitUnsignedAccelerated( integerDotProductAccumulatingSaturating16BitUnsignedAccelerated_ ) - , integerDotProductAccumulatingSaturating16BitSignedAccelerated( integerDotProductAccumulatingSaturating16BitSignedAccelerated_ ) - , integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated( integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated_ ) - , integerDotProductAccumulatingSaturating32BitUnsignedAccelerated( integerDotProductAccumulatingSaturating32BitUnsignedAccelerated_ ) - , integerDotProductAccumulatingSaturating32BitSignedAccelerated( integerDotProductAccumulatingSaturating32BitSignedAccelerated_ ) - , integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated( integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated_ ) - , integerDotProductAccumulatingSaturating64BitUnsignedAccelerated( integerDotProductAccumulatingSaturating64BitUnsignedAccelerated_ ) - , integerDotProductAccumulatingSaturating64BitSignedAccelerated( integerDotProductAccumulatingSaturating64BitSignedAccelerated_ ) - , integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated( integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated_ ) + : pNext{ pNext_ } + , integerDotProduct8BitUnsignedAccelerated{ integerDotProduct8BitUnsignedAccelerated_ } + , integerDotProduct8BitSignedAccelerated{ integerDotProduct8BitSignedAccelerated_ } + , integerDotProduct8BitMixedSignednessAccelerated{ integerDotProduct8BitMixedSignednessAccelerated_ } + , integerDotProduct4x8BitPackedUnsignedAccelerated{ integerDotProduct4x8BitPackedUnsignedAccelerated_ } + , integerDotProduct4x8BitPackedSignedAccelerated{ integerDotProduct4x8BitPackedSignedAccelerated_ } + , integerDotProduct4x8BitPackedMixedSignednessAccelerated{ integerDotProduct4x8BitPackedMixedSignednessAccelerated_ } + , integerDotProduct16BitUnsignedAccelerated{ integerDotProduct16BitUnsignedAccelerated_ } + , integerDotProduct16BitSignedAccelerated{ integerDotProduct16BitSignedAccelerated_ } + , integerDotProduct16BitMixedSignednessAccelerated{ integerDotProduct16BitMixedSignednessAccelerated_ } + , integerDotProduct32BitUnsignedAccelerated{ integerDotProduct32BitUnsignedAccelerated_ } + , integerDotProduct32BitSignedAccelerated{ integerDotProduct32BitSignedAccelerated_ } + , integerDotProduct32BitMixedSignednessAccelerated{ integerDotProduct32BitMixedSignednessAccelerated_ } + , integerDotProduct64BitUnsignedAccelerated{ integerDotProduct64BitUnsignedAccelerated_ } + , integerDotProduct64BitSignedAccelerated{ integerDotProduct64BitSignedAccelerated_ } + , integerDotProduct64BitMixedSignednessAccelerated{ integerDotProduct64BitMixedSignednessAccelerated_ } + , integerDotProductAccumulatingSaturating8BitUnsignedAccelerated{ integerDotProductAccumulatingSaturating8BitUnsignedAccelerated_ } + , integerDotProductAccumulatingSaturating8BitSignedAccelerated{ integerDotProductAccumulatingSaturating8BitSignedAccelerated_ } + , integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated{ integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated_ } + , integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated{ integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated_ } + , integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated{ integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated_ } + , integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated{ integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated_ } + , integerDotProductAccumulatingSaturating16BitUnsignedAccelerated{ integerDotProductAccumulatingSaturating16BitUnsignedAccelerated_ } + , integerDotProductAccumulatingSaturating16BitSignedAccelerated{ integerDotProductAccumulatingSaturating16BitSignedAccelerated_ } + , integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated{ integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated_ } + , integerDotProductAccumulatingSaturating32BitUnsignedAccelerated{ integerDotProductAccumulatingSaturating32BitUnsignedAccelerated_ } + , integerDotProductAccumulatingSaturating32BitSignedAccelerated{ integerDotProductAccumulatingSaturating32BitSignedAccelerated_ } + , integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated{ integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated_ } + , integerDotProductAccumulatingSaturating64BitUnsignedAccelerated{ integerDotProductAccumulatingSaturating64BitUnsignedAccelerated_ } + , integerDotProductAccumulatingSaturating64BitSignedAccelerated{ integerDotProductAccumulatingSaturating64BitSignedAccelerated_ } + , integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated{ integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated_ } { } @@ -83658,8 +85498,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL( VULKAN_HPP_NAMESPACE::Bool32 shaderIntegerFunctions2_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderIntegerFunctions2( shaderIntegerFunctions2_ ) + : pNext{ pNext_ } + , shaderIntegerFunctions2{ shaderIntegerFunctions2_ } { } @@ -83758,8 +85598,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderMaximalReconvergenceFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 shaderMaximalReconvergence_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderMaximalReconvergence( shaderMaximalReconvergence_ ) + : pNext{ pNext_ } + , shaderMaximalReconvergence{ shaderMaximalReconvergence_ } { } @@ -83858,8 +85698,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderModuleIdentifierFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 shaderModuleIdentifier_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderModuleIdentifier( shaderModuleIdentifier_ ) + : pNext{ pNext_ } + , shaderModuleIdentifier{ shaderModuleIdentifier_ } { } @@ -83958,8 +85798,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderModuleIdentifierPropertiesEXT( std::array const & shaderModuleIdentifierAlgorithmUUID_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderModuleIdentifierAlgorithmUUID( shaderModuleIdentifierAlgorithmUUID_ ) + : pNext{ pNext_ } + , shaderModuleIdentifierAlgorithmUUID{ shaderModuleIdentifierAlgorithmUUID_ } { } @@ -84042,8 +85882,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderObjectFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 shaderObject_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderObject( shaderObject_ ) + : pNext{ pNext_ } + , shaderObject{ shaderObject_ } { } @@ -84140,9 +85980,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderObjectPropertiesEXT( std::array const & shaderBinaryUUID_ = {}, uint32_t shaderBinaryVersion_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderBinaryUUID( shaderBinaryUUID_ ) - , shaderBinaryVersion( shaderBinaryVersion_ ) + : pNext{ pNext_ } + , shaderBinaryUUID{ shaderBinaryUUID_ } + , shaderBinaryVersion{ shaderBinaryVersion_ } { } @@ -84227,8 +86067,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderQuadControlFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 shaderQuadControl_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderQuadControl( shaderQuadControl_ ) + : pNext{ pNext_ } + , shaderQuadControl{ shaderQuadControl_ } { } @@ -84315,6 +86155,208 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceShaderQuadControlFeaturesKHR; }; + struct PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR + { + using NativeType = VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 shaderRelaxedExtendedInstruction_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , shaderRelaxedExtendedInstruction{ shaderRelaxedExtendedInstruction_ } + { + } + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR( PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const & rhs ) + VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR( VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR( + *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR & + operator=( PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR & + operator=( VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR & + setShaderRelaxedExtendedInstruction( VULKAN_HPP_NAMESPACE::Bool32 shaderRelaxedExtendedInstruction_ ) VULKAN_HPP_NOEXCEPT + { + shaderRelaxedExtendedInstruction = shaderRelaxedExtendedInstruction_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, shaderRelaxedExtendedInstruction ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const & ) const = default; +#else + bool operator==( PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( shaderRelaxedExtendedInstruction == rhs.shaderRelaxedExtendedInstruction ); +# endif + } + + bool operator!=( PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderRelaxedExtendedInstruction = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR; + }; + + struct PhysicalDeviceShaderReplicatedCompositesFeaturesEXT + { + using NativeType = VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderReplicatedCompositesFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderReplicatedCompositesFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 shaderReplicatedComposites_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , shaderReplicatedComposites{ shaderReplicatedComposites_ } + { + } + + VULKAN_HPP_CONSTEXPR + PhysicalDeviceShaderReplicatedCompositesFeaturesEXT( PhysicalDeviceShaderReplicatedCompositesFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderReplicatedCompositesFeaturesEXT( VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceShaderReplicatedCompositesFeaturesEXT( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceShaderReplicatedCompositesFeaturesEXT & + operator=( PhysicalDeviceShaderReplicatedCompositesFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceShaderReplicatedCompositesFeaturesEXT & operator=( VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderReplicatedCompositesFeaturesEXT & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderReplicatedCompositesFeaturesEXT & + setShaderReplicatedComposites( VULKAN_HPP_NAMESPACE::Bool32 shaderReplicatedComposites_ ) VULKAN_HPP_NOEXCEPT + { + shaderReplicatedComposites = shaderReplicatedComposites_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, shaderReplicatedComposites ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceShaderReplicatedCompositesFeaturesEXT const & ) const = default; +#else + bool operator==( PhysicalDeviceShaderReplicatedCompositesFeaturesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( shaderReplicatedComposites == rhs.shaderReplicatedComposites ); +# endif + } + + bool operator!=( PhysicalDeviceShaderReplicatedCompositesFeaturesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderReplicatedCompositesFeaturesEXT; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderReplicatedComposites = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderReplicatedCompositesFeaturesEXT; + }; + struct PhysicalDeviceShaderSMBuiltinsFeaturesNV { using NativeType = VkPhysicalDeviceShaderSMBuiltinsFeaturesNV; @@ -84325,8 +86367,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderSMBuiltinsFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 shaderSMBuiltins_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderSMBuiltins( shaderSMBuiltins_ ) + : pNext{ pNext_ } + , shaderSMBuiltins{ shaderSMBuiltins_ } { } @@ -84422,9 +86464,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderSMBuiltinsPropertiesNV( uint32_t shaderSMCount_ = {}, uint32_t shaderWarpsPerSM_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderSMCount( shaderSMCount_ ) - , shaderWarpsPerSM( shaderWarpsPerSM_ ) + : pNext{ pNext_ } + , shaderSMCount{ shaderSMCount_ } + , shaderWarpsPerSM{ shaderWarpsPerSM_ } { } @@ -84507,8 +86549,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderSubgroupExtendedTypesFeatures( VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupExtendedTypes_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderSubgroupExtendedTypes( shaderSubgroupExtendedTypes_ ) + : pNext{ pNext_ } + , shaderSubgroupExtendedTypes{ shaderSubgroupExtendedTypes_ } { } @@ -84610,9 +86652,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderSubgroupRotateFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotate_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotateClustered_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderSubgroupRotate( shaderSubgroupRotate_ ) - , shaderSubgroupRotateClustered( shaderSubgroupRotateClustered_ ) + : pNext{ pNext_ } + , shaderSubgroupRotate{ shaderSubgroupRotate_ } + , shaderSubgroupRotateClustered{ shaderSubgroupRotateClustered_ } { } @@ -84719,8 +86761,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupUniformControlFlow_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderSubgroupUniformControlFlow( shaderSubgroupUniformControlFlow_ ) + : pNext{ pNext_ } + , shaderSubgroupUniformControlFlow{ shaderSubgroupUniformControlFlow_ } { } @@ -84821,8 +86863,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderTerminateInvocationFeatures( VULKAN_HPP_NAMESPACE::Bool32 shaderTerminateInvocation_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderTerminateInvocation( shaderTerminateInvocation_ ) + : pNext{ pNext_ } + , shaderTerminateInvocation{ shaderTerminateInvocation_ } { } @@ -84924,10 +86966,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 shaderTileImageDepthReadAccess_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderTileImageStencilReadAccess_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderTileImageColorReadAccess( shaderTileImageColorReadAccess_ ) - , shaderTileImageDepthReadAccess( shaderTileImageDepthReadAccess_ ) - , shaderTileImageStencilReadAccess( shaderTileImageStencilReadAccess_ ) + : pNext{ pNext_ } + , shaderTileImageColorReadAccess{ shaderTileImageColorReadAccess_ } + , shaderTileImageDepthReadAccess{ shaderTileImageDepthReadAccess_ } + , shaderTileImageStencilReadAccess{ shaderTileImageStencilReadAccess_ } { } @@ -85048,10 +87090,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 shaderTileImageReadSampleFromPixelRateInvocation_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderTileImageReadFromHelperInvocation_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderTileImageCoherentReadAccelerated( shaderTileImageCoherentReadAccelerated_ ) - , shaderTileImageReadSampleFromPixelRateInvocation( shaderTileImageReadSampleFromPixelRateInvocation_ ) - , shaderTileImageReadFromHelperInvocation( shaderTileImageReadFromHelperInvocation_ ) + : pNext{ pNext_ } + , shaderTileImageCoherentReadAccelerated{ shaderTileImageCoherentReadAccelerated_ } + , shaderTileImageReadSampleFromPixelRateInvocation{ shaderTileImageReadSampleFromPixelRateInvocation_ } + , shaderTileImageReadFromHelperInvocation{ shaderTileImageReadFromHelperInvocation_ } { } @@ -85143,9 +87185,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceShadingRateImageFeaturesNV( VULKAN_HPP_NAMESPACE::Bool32 shadingRateImage_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shadingRateCoarseSampleOrder_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shadingRateImage( shadingRateImage_ ) - , shadingRateCoarseSampleOrder( shadingRateCoarseSampleOrder_ ) + : pNext{ pNext_ } + , shadingRateImage{ shadingRateImage_ } + , shadingRateCoarseSampleOrder{ shadingRateCoarseSampleOrder_ } { } @@ -85252,10 +87294,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t shadingRatePaletteSize_ = {}, uint32_t shadingRateMaxCoarseSamples_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shadingRateTexelSize( shadingRateTexelSize_ ) - , shadingRatePaletteSize( shadingRatePaletteSize_ ) - , shadingRateMaxCoarseSamples( shadingRateMaxCoarseSamples_ ) + : pNext{ pNext_ } + , shadingRateTexelSize{ shadingRateTexelSize_ } + , shadingRatePaletteSize{ shadingRatePaletteSize_ } + , shadingRateMaxCoarseSamples{ shadingRateMaxCoarseSamples_ } { } @@ -85345,12 +87387,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_ = {}, VULKAN_HPP_NAMESPACE::ImageTiling tiling_ = VULKAN_HPP_NAMESPACE::ImageTiling::eOptimal, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , format( format_ ) - , type( type_ ) - , samples( samples_ ) - , usage( usage_ ) - , tiling( tiling_ ) + : pNext{ pNext_ } + , format{ format_ } + , type{ type_ } + , samples{ samples_ } + , usage{ usage_ } + , tiling{ tiling_ } { } @@ -85486,11 +87528,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::SubgroupFeatureFlags supportedOperations_ = {}, VULKAN_HPP_NAMESPACE::Bool32 quadOperationsInAllStages_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , subgroupSize( subgroupSize_ ) - , supportedStages( supportedStages_ ) - , supportedOperations( supportedOperations_ ) - , quadOperationsInAllStages( quadOperationsInAllStages_ ) + : pNext{ pNext_ } + , subgroupSize{ subgroupSize_ } + , supportedStages{ supportedStages_ } + , supportedOperations{ supportedOperations_ } + , quadOperationsInAllStages{ quadOperationsInAllStages_ } { } @@ -85582,9 +87624,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceSubgroupSizeControlFeatures( VULKAN_HPP_NAMESPACE::Bool32 subgroupSizeControl_ = {}, VULKAN_HPP_NAMESPACE::Bool32 computeFullSubgroups_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , subgroupSizeControl( subgroupSizeControl_ ) - , computeFullSubgroups( computeFullSubgroups_ ) + : pNext{ pNext_ } + , subgroupSizeControl{ subgroupSizeControl_ } + , computeFullSubgroups{ computeFullSubgroups_ } { } @@ -85695,11 +87737,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxComputeWorkgroupSubgroups_ = {}, VULKAN_HPP_NAMESPACE::ShaderStageFlags requiredSubgroupSizeStages_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , minSubgroupSize( minSubgroupSize_ ) - , maxSubgroupSize( maxSubgroupSize_ ) - , maxComputeWorkgroupSubgroups( maxComputeWorkgroupSubgroups_ ) - , requiredSubgroupSizeStages( requiredSubgroupSizeStages_ ) + : pNext{ pNext_ } + , minSubgroupSize{ minSubgroupSize_ } + , maxSubgroupSize{ maxSubgroupSize_ } + , maxComputeWorkgroupSubgroups{ maxComputeWorkgroupSubgroups_ } + , requiredSubgroupSizeStages{ requiredSubgroupSizeStages_ } { } @@ -85792,8 +87834,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceSubpassMergeFeedbackFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 subpassMergeFeedback_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , subpassMergeFeedback( subpassMergeFeedback_ ) + : pNext{ pNext_ } + , subpassMergeFeedback{ subpassMergeFeedback_ } { } @@ -85891,8 +87933,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceSubpassShadingFeaturesHUAWEI( VULKAN_HPP_NAMESPACE::Bool32 subpassShading_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , subpassShading( subpassShading_ ) + : pNext{ pNext_ } + , subpassShading{ subpassShading_ } { } @@ -85988,8 +88030,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceSubpassShadingPropertiesHUAWEI( uint32_t maxSubpassShadingWorkgroupSizeAspectRatio_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxSubpassShadingWorkgroupSizeAspectRatio( maxSubpassShadingWorkgroupSizeAspectRatio_ ) + : pNext{ pNext_ } + , maxSubpassShadingWorkgroupSizeAspectRatio{ maxSubpassShadingWorkgroupSizeAspectRatio_ } { } @@ -86070,8 +88112,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceSurfaceInfo2KHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , surface( surface_ ) + : pNext{ pNext_ } + , surface{ surface_ } { } @@ -86167,8 +88209,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceSwapchainMaintenance1FeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 swapchainMaintenance1_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , swapchainMaintenance1( swapchainMaintenance1_ ) + : pNext{ pNext_ } + , swapchainMaintenance1{ swapchainMaintenance1_ } { } @@ -86266,8 +88308,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceSynchronization2Features( VULKAN_HPP_NAMESPACE::Bool32 synchronization2_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , synchronization2( synchronization2_ ) + : pNext{ pNext_ } + , synchronization2{ synchronization2_ } { } @@ -86365,8 +88407,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceTexelBufferAlignmentFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 texelBufferAlignment_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , texelBufferAlignment( texelBufferAlignment_ ) + : pNext{ pNext_ } + , texelBufferAlignment{ texelBufferAlignment_ } { } @@ -86467,11 +88509,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize uniformTexelBufferOffsetAlignmentBytes_ = {}, VULKAN_HPP_NAMESPACE::Bool32 uniformTexelBufferOffsetSingleTexelAlignment_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , storageTexelBufferOffsetAlignmentBytes( storageTexelBufferOffsetAlignmentBytes_ ) - , storageTexelBufferOffsetSingleTexelAlignment( storageTexelBufferOffsetSingleTexelAlignment_ ) - , uniformTexelBufferOffsetAlignmentBytes( uniformTexelBufferOffsetAlignmentBytes_ ) - , uniformTexelBufferOffsetSingleTexelAlignment( uniformTexelBufferOffsetSingleTexelAlignment_ ) + : pNext{ pNext_ } + , storageTexelBufferOffsetAlignmentBytes{ storageTexelBufferOffsetAlignmentBytes_ } + , storageTexelBufferOffsetSingleTexelAlignment{ storageTexelBufferOffsetSingleTexelAlignment_ } + , uniformTexelBufferOffsetAlignmentBytes{ uniformTexelBufferOffsetAlignmentBytes_ } + , uniformTexelBufferOffsetSingleTexelAlignment{ uniformTexelBufferOffsetSingleTexelAlignment_ } { } @@ -86571,8 +88613,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceTextureCompressionASTCHDRFeatures( VULKAN_HPP_NAMESPACE::Bool32 textureCompressionASTC_HDR_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , textureCompressionASTC_HDR( textureCompressionASTC_HDR_ ) + : pNext{ pNext_ } + , textureCompressionASTC_HDR{ textureCompressionASTC_HDR_ } { } @@ -86672,8 +88714,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceTilePropertiesFeaturesQCOM( VULKAN_HPP_NAMESPACE::Bool32 tileProperties_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , tileProperties( tileProperties_ ) + : pNext{ pNext_ } + , tileProperties{ tileProperties_ } { } @@ -86769,8 +88811,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceTimelineSemaphoreFeatures( VULKAN_HPP_NAMESPACE::Bool32 timelineSemaphore_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , timelineSemaphore( timelineSemaphore_ ) + : pNext{ pNext_ } + , timelineSemaphore{ timelineSemaphore_ } { } @@ -86869,8 +88911,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceTimelineSemaphoreProperties( uint64_t maxTimelineSemaphoreValueDifference_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxTimelineSemaphoreValueDifference( maxTimelineSemaphoreValueDifference_ ) + : pNext{ pNext_ } + , maxTimelineSemaphoreValueDifference{ maxTimelineSemaphoreValueDifference_ } { } @@ -86958,12 +89000,12 @@ namespace VULKAN_HPP_NAMESPACE std::array const & description_ = {}, std::array const & layer_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , name( name_ ) - , version( version_ ) - , purposes( purposes_ ) - , description( description_ ) - , layer( layer_ ) + : pNext{ pNext_ } + , name{ name_ } + , version{ version_ } + , purposes{ purposes_ } + , description{ description_ } + , layer{ layer_ } { } @@ -86974,45 +89016,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PhysicalDeviceToolProperties( std::string const & name_, - std::string const & version_ = {}, - VULKAN_HPP_NAMESPACE::ToolPurposeFlags purposes_ = {}, - std::string const & description_ = {}, - std::string const & layer_ = {}, - void * pNext_ = nullptr ) - : pNext( pNext_ ), purposes( purposes_ ) - { - VULKAN_HPP_ASSERT( name_.size() < VK_MAX_EXTENSION_NAME_SIZE ); -# if defined( WIN32 ) - strncpy_s( name, VK_MAX_EXTENSION_NAME_SIZE, name_.data(), name_.size() ); -# else - strncpy( name, name_.data(), std::min( VK_MAX_EXTENSION_NAME_SIZE, name_.size() ) ); -# endif - - VULKAN_HPP_ASSERT( version_.size() < VK_MAX_EXTENSION_NAME_SIZE ); -# if defined( WIN32 ) - strncpy_s( version, VK_MAX_EXTENSION_NAME_SIZE, version_.data(), version_.size() ); -# else - strncpy( version, version_.data(), std::min( VK_MAX_EXTENSION_NAME_SIZE, version_.size() ) ); -# endif - - VULKAN_HPP_ASSERT( description_.size() < VK_MAX_DESCRIPTION_SIZE ); -# if defined( WIN32 ) - strncpy_s( description, VK_MAX_DESCRIPTION_SIZE, description_.data(), description_.size() ); -# else - strncpy( description, description_.data(), std::min( VK_MAX_DESCRIPTION_SIZE, description_.size() ) ); -# endif - - VULKAN_HPP_ASSERT( layer_.size() < VK_MAX_EXTENSION_NAME_SIZE ); -# if defined( WIN32 ) - strncpy_s( layer, VK_MAX_EXTENSION_NAME_SIZE, layer_.data(), layer_.size() ); -# else - strncpy( layer, layer_.data(), std::min( VK_MAX_EXTENSION_NAME_SIZE, layer_.size() ) ); -# endif - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - PhysicalDeviceToolProperties & operator=( PhysicalDeviceToolProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -87112,9 +89115,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceTransformFeedbackFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 transformFeedback_ = {}, VULKAN_HPP_NAMESPACE::Bool32 geometryStreams_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , transformFeedback( transformFeedback_ ) - , geometryStreams( geometryStreams_ ) + : pNext{ pNext_ } + , transformFeedback{ transformFeedback_ } + , geometryStreams{ geometryStreams_ } { } @@ -87227,17 +89230,17 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 transformFeedbackRasterizationStreamSelect_ = {}, VULKAN_HPP_NAMESPACE::Bool32 transformFeedbackDraw_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxTransformFeedbackStreams( maxTransformFeedbackStreams_ ) - , maxTransformFeedbackBuffers( maxTransformFeedbackBuffers_ ) - , maxTransformFeedbackBufferSize( maxTransformFeedbackBufferSize_ ) - , maxTransformFeedbackStreamDataSize( maxTransformFeedbackStreamDataSize_ ) - , maxTransformFeedbackBufferDataSize( maxTransformFeedbackBufferDataSize_ ) - , maxTransformFeedbackBufferDataStride( maxTransformFeedbackBufferDataStride_ ) - , transformFeedbackQueries( transformFeedbackQueries_ ) - , transformFeedbackStreamsLinesTriangles( transformFeedbackStreamsLinesTriangles_ ) - , transformFeedbackRasterizationStreamSelect( transformFeedbackRasterizationStreamSelect_ ) - , transformFeedbackDraw( transformFeedbackDraw_ ) + : pNext{ pNext_ } + , maxTransformFeedbackStreams{ maxTransformFeedbackStreams_ } + , maxTransformFeedbackBuffers{ maxTransformFeedbackBuffers_ } + , maxTransformFeedbackBufferSize{ maxTransformFeedbackBufferSize_ } + , maxTransformFeedbackStreamDataSize{ maxTransformFeedbackStreamDataSize_ } + , maxTransformFeedbackBufferDataSize{ maxTransformFeedbackBufferDataSize_ } + , maxTransformFeedbackBufferDataStride{ maxTransformFeedbackBufferDataStride_ } + , transformFeedbackQueries{ transformFeedbackQueries_ } + , transformFeedbackStreamsLinesTriangles{ transformFeedbackStreamsLinesTriangles_ } + , transformFeedbackRasterizationStreamSelect{ transformFeedbackRasterizationStreamSelect_ } + , transformFeedbackDraw{ transformFeedbackDraw_ } { } @@ -87358,8 +89361,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceUniformBufferStandardLayoutFeatures( VULKAN_HPP_NAMESPACE::Bool32 uniformBufferStandardLayout_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , uniformBufferStandardLayout( uniformBufferStandardLayout_ ) + : pNext{ pNext_ } + , uniformBufferStandardLayout{ uniformBufferStandardLayout_ } { } @@ -87461,9 +89464,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceVariablePointersFeatures( VULKAN_HPP_NAMESPACE::Bool32 variablePointersStorageBuffer_ = {}, VULKAN_HPP_NAMESPACE::Bool32 variablePointers_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , variablePointersStorageBuffer( variablePointersStorageBuffer_ ) - , variablePointers( variablePointers_ ) + : pNext{ pNext_ } + , variablePointersStorageBuffer{ variablePointersStorageBuffer_ } + , variablePointers{ variablePointers_ } { } @@ -87573,9 +89576,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceVertexAttributeDivisorFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateDivisor_ = {}, VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateZeroDivisor_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , vertexAttributeInstanceRateDivisor( vertexAttributeInstanceRateDivisor_ ) - , vertexAttributeInstanceRateZeroDivisor( vertexAttributeInstanceRateZeroDivisor_ ) + : pNext{ pNext_ } + , vertexAttributeInstanceRateDivisor{ vertexAttributeInstanceRateDivisor_ } + , vertexAttributeInstanceRateZeroDivisor{ vertexAttributeInstanceRateZeroDivisor_ } { } @@ -87683,8 +89686,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceVertexAttributeDivisorPropertiesEXT( uint32_t maxVertexAttribDivisor_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxVertexAttribDivisor( maxVertexAttribDivisor_ ) + : pNext{ pNext_ } + , maxVertexAttribDivisor{ maxVertexAttribDivisor_ } { } @@ -87769,9 +89772,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceVertexAttributeDivisorPropertiesKHR( uint32_t maxVertexAttribDivisor_ = {}, VULKAN_HPP_NAMESPACE::Bool32 supportsNonZeroFirstInstance_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxVertexAttribDivisor( maxVertexAttribDivisor_ ) - , supportsNonZeroFirstInstance( supportsNonZeroFirstInstance_ ) + : pNext{ pNext_ } + , maxVertexAttribDivisor{ maxVertexAttribDivisor_ } + , supportsNonZeroFirstInstance{ supportsNonZeroFirstInstance_ } { } @@ -87857,8 +89860,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceVertexInputDynamicStateFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 vertexInputDynamicState_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , vertexInputDynamicState( vertexInputDynamicState_ ) + : pNext{ pNext_ } + , vertexInputDynamicState{ vertexInputDynamicState_ } { } @@ -87960,11 +89963,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VideoComponentBitDepthFlagsKHR lumaBitDepth_ = {}, VULKAN_HPP_NAMESPACE::VideoComponentBitDepthFlagsKHR chromaBitDepth_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , videoCodecOperation( videoCodecOperation_ ) - , chromaSubsampling( chromaSubsampling_ ) - , lumaBitDepth( lumaBitDepth_ ) - , chromaBitDepth( chromaBitDepth_ ) + : pNext{ pNext_ } + , videoCodecOperation{ videoCodecOperation_ } + , chromaSubsampling{ chromaSubsampling_ } + , lumaBitDepth{ lumaBitDepth_ } + , chromaBitDepth{ chromaBitDepth_ } { } @@ -88089,9 +90092,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PhysicalDeviceVideoEncodeQualityLevelInfoKHR( const VULKAN_HPP_NAMESPACE::VideoProfileInfoKHR * pVideoProfile_ = {}, uint32_t qualityLevel_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pVideoProfile( pVideoProfile_ ) - , qualityLevel( qualityLevel_ ) + : pNext{ pNext_ } + , pVideoProfile{ pVideoProfile_ } + , qualityLevel{ qualityLevel_ } { } @@ -88195,8 +90198,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceVideoFormatInfoKHR( VULKAN_HPP_NAMESPACE::ImageUsageFlags imageUsage_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageUsage( imageUsage_ ) + : pNext{ pNext_ } + , imageUsage{ imageUsage_ } { } @@ -88292,8 +90295,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceVideoMaintenance1FeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 videoMaintenance1_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , videoMaintenance1( videoMaintenance1_ ) + : pNext{ pNext_ } + , videoMaintenance1{ videoMaintenance1_ } { } @@ -88401,19 +90404,19 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 samplerYcbcrConversion_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDrawParameters_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , storageBuffer16BitAccess( storageBuffer16BitAccess_ ) - , uniformAndStorageBuffer16BitAccess( uniformAndStorageBuffer16BitAccess_ ) - , storagePushConstant16( storagePushConstant16_ ) - , storageInputOutput16( storageInputOutput16_ ) - , multiview( multiview_ ) - , multiviewGeometryShader( multiviewGeometryShader_ ) - , multiviewTessellationShader( multiviewTessellationShader_ ) - , variablePointersStorageBuffer( variablePointersStorageBuffer_ ) - , variablePointers( variablePointers_ ) - , protectedMemory( protectedMemory_ ) - , samplerYcbcrConversion( samplerYcbcrConversion_ ) - , shaderDrawParameters( shaderDrawParameters_ ) + : pNext{ pNext_ } + , storageBuffer16BitAccess{ storageBuffer16BitAccess_ } + , uniformAndStorageBuffer16BitAccess{ uniformAndStorageBuffer16BitAccess_ } + , storagePushConstant16{ storagePushConstant16_ } + , storageInputOutput16{ storageInputOutput16_ } + , multiview{ multiview_ } + , multiviewGeometryShader{ multiviewGeometryShader_ } + , multiviewTessellationShader{ multiviewTessellationShader_ } + , variablePointersStorageBuffer{ variablePointersStorageBuffer_ } + , variablePointers{ variablePointers_ } + , protectedMemory{ protectedMemory_ } + , samplerYcbcrConversion{ samplerYcbcrConversion_ } + , shaderDrawParameters{ shaderDrawParameters_ } { } @@ -88639,22 +90642,22 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxPerSetDescriptors_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize maxMemoryAllocationSize_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , deviceUUID( deviceUUID_ ) - , driverUUID( driverUUID_ ) - , deviceLUID( deviceLUID_ ) - , deviceNodeMask( deviceNodeMask_ ) - , deviceLUIDValid( deviceLUIDValid_ ) - , subgroupSize( subgroupSize_ ) - , subgroupSupportedStages( subgroupSupportedStages_ ) - , subgroupSupportedOperations( subgroupSupportedOperations_ ) - , subgroupQuadOperationsInAllStages( subgroupQuadOperationsInAllStages_ ) - , pointClippingBehavior( pointClippingBehavior_ ) - , maxMultiviewViewCount( maxMultiviewViewCount_ ) - , maxMultiviewInstanceIndex( maxMultiviewInstanceIndex_ ) - , protectedNoFault( protectedNoFault_ ) - , maxPerSetDescriptors( maxPerSetDescriptors_ ) - , maxMemoryAllocationSize( maxMemoryAllocationSize_ ) + : pNext{ pNext_ } + , deviceUUID{ deviceUUID_ } + , driverUUID{ driverUUID_ } + , deviceLUID{ deviceLUID_ } + , deviceNodeMask{ deviceNodeMask_ } + , deviceLUIDValid{ deviceLUIDValid_ } + , subgroupSize{ subgroupSize_ } + , subgroupSupportedStages{ subgroupSupportedStages_ } + , subgroupSupportedOperations{ subgroupSupportedOperations_ } + , subgroupQuadOperationsInAllStages{ subgroupQuadOperationsInAllStages_ } + , pointClippingBehavior{ pointClippingBehavior_ } + , maxMultiviewViewCount{ maxMultiviewViewCount_ } + , maxMultiviewInstanceIndex{ maxMultiviewInstanceIndex_ } + , protectedNoFault{ protectedNoFault_ } + , maxPerSetDescriptors{ maxPerSetDescriptors_ } + , maxMemoryAllocationSize{ maxMemoryAllocationSize_ } { } @@ -88835,54 +90838,54 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 shaderOutputLayer_ = {}, VULKAN_HPP_NAMESPACE::Bool32 subgroupBroadcastDynamicId_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , samplerMirrorClampToEdge( samplerMirrorClampToEdge_ ) - , drawIndirectCount( drawIndirectCount_ ) - , storageBuffer8BitAccess( storageBuffer8BitAccess_ ) - , uniformAndStorageBuffer8BitAccess( uniformAndStorageBuffer8BitAccess_ ) - , storagePushConstant8( storagePushConstant8_ ) - , shaderBufferInt64Atomics( shaderBufferInt64Atomics_ ) - , shaderSharedInt64Atomics( shaderSharedInt64Atomics_ ) - , shaderFloat16( shaderFloat16_ ) - , shaderInt8( shaderInt8_ ) - , descriptorIndexing( descriptorIndexing_ ) - , shaderInputAttachmentArrayDynamicIndexing( shaderInputAttachmentArrayDynamicIndexing_ ) - , shaderUniformTexelBufferArrayDynamicIndexing( shaderUniformTexelBufferArrayDynamicIndexing_ ) - , shaderStorageTexelBufferArrayDynamicIndexing( shaderStorageTexelBufferArrayDynamicIndexing_ ) - , shaderUniformBufferArrayNonUniformIndexing( shaderUniformBufferArrayNonUniformIndexing_ ) - , shaderSampledImageArrayNonUniformIndexing( shaderSampledImageArrayNonUniformIndexing_ ) - , shaderStorageBufferArrayNonUniformIndexing( shaderStorageBufferArrayNonUniformIndexing_ ) - , shaderStorageImageArrayNonUniformIndexing( shaderStorageImageArrayNonUniformIndexing_ ) - , shaderInputAttachmentArrayNonUniformIndexing( shaderInputAttachmentArrayNonUniformIndexing_ ) - , shaderUniformTexelBufferArrayNonUniformIndexing( shaderUniformTexelBufferArrayNonUniformIndexing_ ) - , shaderStorageTexelBufferArrayNonUniformIndexing( shaderStorageTexelBufferArrayNonUniformIndexing_ ) - , descriptorBindingUniformBufferUpdateAfterBind( descriptorBindingUniformBufferUpdateAfterBind_ ) - , descriptorBindingSampledImageUpdateAfterBind( descriptorBindingSampledImageUpdateAfterBind_ ) - , descriptorBindingStorageImageUpdateAfterBind( descriptorBindingStorageImageUpdateAfterBind_ ) - , descriptorBindingStorageBufferUpdateAfterBind( descriptorBindingStorageBufferUpdateAfterBind_ ) - , descriptorBindingUniformTexelBufferUpdateAfterBind( descriptorBindingUniformTexelBufferUpdateAfterBind_ ) - , descriptorBindingStorageTexelBufferUpdateAfterBind( descriptorBindingStorageTexelBufferUpdateAfterBind_ ) - , descriptorBindingUpdateUnusedWhilePending( descriptorBindingUpdateUnusedWhilePending_ ) - , descriptorBindingPartiallyBound( descriptorBindingPartiallyBound_ ) - , descriptorBindingVariableDescriptorCount( descriptorBindingVariableDescriptorCount_ ) - , runtimeDescriptorArray( runtimeDescriptorArray_ ) - , samplerFilterMinmax( samplerFilterMinmax_ ) - , scalarBlockLayout( scalarBlockLayout_ ) - , imagelessFramebuffer( imagelessFramebuffer_ ) - , uniformBufferStandardLayout( uniformBufferStandardLayout_ ) - , shaderSubgroupExtendedTypes( shaderSubgroupExtendedTypes_ ) - , separateDepthStencilLayouts( separateDepthStencilLayouts_ ) - , hostQueryReset( hostQueryReset_ ) - , timelineSemaphore( timelineSemaphore_ ) - , bufferDeviceAddress( bufferDeviceAddress_ ) - , bufferDeviceAddressCaptureReplay( bufferDeviceAddressCaptureReplay_ ) - , bufferDeviceAddressMultiDevice( bufferDeviceAddressMultiDevice_ ) - , vulkanMemoryModel( vulkanMemoryModel_ ) - , vulkanMemoryModelDeviceScope( vulkanMemoryModelDeviceScope_ ) - , vulkanMemoryModelAvailabilityVisibilityChains( vulkanMemoryModelAvailabilityVisibilityChains_ ) - , shaderOutputViewportIndex( shaderOutputViewportIndex_ ) - , shaderOutputLayer( shaderOutputLayer_ ) - , subgroupBroadcastDynamicId( subgroupBroadcastDynamicId_ ) + : pNext{ pNext_ } + , samplerMirrorClampToEdge{ samplerMirrorClampToEdge_ } + , drawIndirectCount{ drawIndirectCount_ } + , storageBuffer8BitAccess{ storageBuffer8BitAccess_ } + , uniformAndStorageBuffer8BitAccess{ uniformAndStorageBuffer8BitAccess_ } + , storagePushConstant8{ storagePushConstant8_ } + , shaderBufferInt64Atomics{ shaderBufferInt64Atomics_ } + , shaderSharedInt64Atomics{ shaderSharedInt64Atomics_ } + , shaderFloat16{ shaderFloat16_ } + , shaderInt8{ shaderInt8_ } + , descriptorIndexing{ descriptorIndexing_ } + , shaderInputAttachmentArrayDynamicIndexing{ shaderInputAttachmentArrayDynamicIndexing_ } + , shaderUniformTexelBufferArrayDynamicIndexing{ shaderUniformTexelBufferArrayDynamicIndexing_ } + , shaderStorageTexelBufferArrayDynamicIndexing{ shaderStorageTexelBufferArrayDynamicIndexing_ } + , shaderUniformBufferArrayNonUniformIndexing{ shaderUniformBufferArrayNonUniformIndexing_ } + , shaderSampledImageArrayNonUniformIndexing{ shaderSampledImageArrayNonUniformIndexing_ } + , shaderStorageBufferArrayNonUniformIndexing{ shaderStorageBufferArrayNonUniformIndexing_ } + , shaderStorageImageArrayNonUniformIndexing{ shaderStorageImageArrayNonUniformIndexing_ } + , shaderInputAttachmentArrayNonUniformIndexing{ shaderInputAttachmentArrayNonUniformIndexing_ } + , shaderUniformTexelBufferArrayNonUniformIndexing{ shaderUniformTexelBufferArrayNonUniformIndexing_ } + , shaderStorageTexelBufferArrayNonUniformIndexing{ shaderStorageTexelBufferArrayNonUniformIndexing_ } + , descriptorBindingUniformBufferUpdateAfterBind{ descriptorBindingUniformBufferUpdateAfterBind_ } + , descriptorBindingSampledImageUpdateAfterBind{ descriptorBindingSampledImageUpdateAfterBind_ } + , descriptorBindingStorageImageUpdateAfterBind{ descriptorBindingStorageImageUpdateAfterBind_ } + , descriptorBindingStorageBufferUpdateAfterBind{ descriptorBindingStorageBufferUpdateAfterBind_ } + , descriptorBindingUniformTexelBufferUpdateAfterBind{ descriptorBindingUniformTexelBufferUpdateAfterBind_ } + , descriptorBindingStorageTexelBufferUpdateAfterBind{ descriptorBindingStorageTexelBufferUpdateAfterBind_ } + , descriptorBindingUpdateUnusedWhilePending{ descriptorBindingUpdateUnusedWhilePending_ } + , descriptorBindingPartiallyBound{ descriptorBindingPartiallyBound_ } + , descriptorBindingVariableDescriptorCount{ descriptorBindingVariableDescriptorCount_ } + , runtimeDescriptorArray{ runtimeDescriptorArray_ } + , samplerFilterMinmax{ samplerFilterMinmax_ } + , scalarBlockLayout{ scalarBlockLayout_ } + , imagelessFramebuffer{ imagelessFramebuffer_ } + , uniformBufferStandardLayout{ uniformBufferStandardLayout_ } + , shaderSubgroupExtendedTypes{ shaderSubgroupExtendedTypes_ } + , separateDepthStencilLayouts{ separateDepthStencilLayouts_ } + , hostQueryReset{ hostQueryReset_ } + , timelineSemaphore{ timelineSemaphore_ } + , bufferDeviceAddress{ bufferDeviceAddress_ } + , bufferDeviceAddressCaptureReplay{ bufferDeviceAddressCaptureReplay_ } + , bufferDeviceAddressMultiDevice{ bufferDeviceAddressMultiDevice_ } + , vulkanMemoryModel{ vulkanMemoryModel_ } + , vulkanMemoryModelDeviceScope{ vulkanMemoryModelDeviceScope_ } + , vulkanMemoryModelAvailabilityVisibilityChains{ vulkanMemoryModelAvailabilityVisibilityChains_ } + , shaderOutputViewportIndex{ shaderOutputViewportIndex_ } + , shaderOutputLayer{ shaderOutputLayer_ } + , subgroupBroadcastDynamicId{ subgroupBroadcastDynamicId_ } { } @@ -89516,59 +91519,59 @@ namespace VULKAN_HPP_NAMESPACE uint64_t maxTimelineSemaphoreValueDifference_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags framebufferIntegerColorSampleCounts_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , driverID( driverID_ ) - , driverName( driverName_ ) - , driverInfo( driverInfo_ ) - , conformanceVersion( conformanceVersion_ ) - , denormBehaviorIndependence( denormBehaviorIndependence_ ) - , roundingModeIndependence( roundingModeIndependence_ ) - , shaderSignedZeroInfNanPreserveFloat16( shaderSignedZeroInfNanPreserveFloat16_ ) - , shaderSignedZeroInfNanPreserveFloat32( shaderSignedZeroInfNanPreserveFloat32_ ) - , shaderSignedZeroInfNanPreserveFloat64( shaderSignedZeroInfNanPreserveFloat64_ ) - , shaderDenormPreserveFloat16( shaderDenormPreserveFloat16_ ) - , shaderDenormPreserveFloat32( shaderDenormPreserveFloat32_ ) - , shaderDenormPreserveFloat64( shaderDenormPreserveFloat64_ ) - , shaderDenormFlushToZeroFloat16( shaderDenormFlushToZeroFloat16_ ) - , shaderDenormFlushToZeroFloat32( shaderDenormFlushToZeroFloat32_ ) - , shaderDenormFlushToZeroFloat64( shaderDenormFlushToZeroFloat64_ ) - , shaderRoundingModeRTEFloat16( shaderRoundingModeRTEFloat16_ ) - , shaderRoundingModeRTEFloat32( shaderRoundingModeRTEFloat32_ ) - , shaderRoundingModeRTEFloat64( shaderRoundingModeRTEFloat64_ ) - , shaderRoundingModeRTZFloat16( shaderRoundingModeRTZFloat16_ ) - , shaderRoundingModeRTZFloat32( shaderRoundingModeRTZFloat32_ ) - , shaderRoundingModeRTZFloat64( shaderRoundingModeRTZFloat64_ ) - , maxUpdateAfterBindDescriptorsInAllPools( maxUpdateAfterBindDescriptorsInAllPools_ ) - , shaderUniformBufferArrayNonUniformIndexingNative( shaderUniformBufferArrayNonUniformIndexingNative_ ) - , shaderSampledImageArrayNonUniformIndexingNative( shaderSampledImageArrayNonUniformIndexingNative_ ) - , shaderStorageBufferArrayNonUniformIndexingNative( shaderStorageBufferArrayNonUniformIndexingNative_ ) - , shaderStorageImageArrayNonUniformIndexingNative( shaderStorageImageArrayNonUniformIndexingNative_ ) - , shaderInputAttachmentArrayNonUniformIndexingNative( shaderInputAttachmentArrayNonUniformIndexingNative_ ) - , robustBufferAccessUpdateAfterBind( robustBufferAccessUpdateAfterBind_ ) - , quadDivergentImplicitLod( quadDivergentImplicitLod_ ) - , maxPerStageDescriptorUpdateAfterBindSamplers( maxPerStageDescriptorUpdateAfterBindSamplers_ ) - , maxPerStageDescriptorUpdateAfterBindUniformBuffers( maxPerStageDescriptorUpdateAfterBindUniformBuffers_ ) - , maxPerStageDescriptorUpdateAfterBindStorageBuffers( maxPerStageDescriptorUpdateAfterBindStorageBuffers_ ) - , maxPerStageDescriptorUpdateAfterBindSampledImages( maxPerStageDescriptorUpdateAfterBindSampledImages_ ) - , maxPerStageDescriptorUpdateAfterBindStorageImages( maxPerStageDescriptorUpdateAfterBindStorageImages_ ) - , maxPerStageDescriptorUpdateAfterBindInputAttachments( maxPerStageDescriptorUpdateAfterBindInputAttachments_ ) - , maxPerStageUpdateAfterBindResources( maxPerStageUpdateAfterBindResources_ ) - , maxDescriptorSetUpdateAfterBindSamplers( maxDescriptorSetUpdateAfterBindSamplers_ ) - , maxDescriptorSetUpdateAfterBindUniformBuffers( maxDescriptorSetUpdateAfterBindUniformBuffers_ ) - , maxDescriptorSetUpdateAfterBindUniformBuffersDynamic( maxDescriptorSetUpdateAfterBindUniformBuffersDynamic_ ) - , maxDescriptorSetUpdateAfterBindStorageBuffers( maxDescriptorSetUpdateAfterBindStorageBuffers_ ) - , maxDescriptorSetUpdateAfterBindStorageBuffersDynamic( maxDescriptorSetUpdateAfterBindStorageBuffersDynamic_ ) - , maxDescriptorSetUpdateAfterBindSampledImages( maxDescriptorSetUpdateAfterBindSampledImages_ ) - , maxDescriptorSetUpdateAfterBindStorageImages( maxDescriptorSetUpdateAfterBindStorageImages_ ) - , maxDescriptorSetUpdateAfterBindInputAttachments( maxDescriptorSetUpdateAfterBindInputAttachments_ ) - , supportedDepthResolveModes( supportedDepthResolveModes_ ) - , supportedStencilResolveModes( supportedStencilResolveModes_ ) - , independentResolveNone( independentResolveNone_ ) - , independentResolve( independentResolve_ ) - , filterMinmaxSingleComponentFormats( filterMinmaxSingleComponentFormats_ ) - , filterMinmaxImageComponentMapping( filterMinmaxImageComponentMapping_ ) - , maxTimelineSemaphoreValueDifference( maxTimelineSemaphoreValueDifference_ ) - , framebufferIntegerColorSampleCounts( framebufferIntegerColorSampleCounts_ ) + : pNext{ pNext_ } + , driverID{ driverID_ } + , driverName{ driverName_ } + , driverInfo{ driverInfo_ } + , conformanceVersion{ conformanceVersion_ } + , denormBehaviorIndependence{ denormBehaviorIndependence_ } + , roundingModeIndependence{ roundingModeIndependence_ } + , shaderSignedZeroInfNanPreserveFloat16{ shaderSignedZeroInfNanPreserveFloat16_ } + , shaderSignedZeroInfNanPreserveFloat32{ shaderSignedZeroInfNanPreserveFloat32_ } + , shaderSignedZeroInfNanPreserveFloat64{ shaderSignedZeroInfNanPreserveFloat64_ } + , shaderDenormPreserveFloat16{ shaderDenormPreserveFloat16_ } + , shaderDenormPreserveFloat32{ shaderDenormPreserveFloat32_ } + , shaderDenormPreserveFloat64{ shaderDenormPreserveFloat64_ } + , shaderDenormFlushToZeroFloat16{ shaderDenormFlushToZeroFloat16_ } + , shaderDenormFlushToZeroFloat32{ shaderDenormFlushToZeroFloat32_ } + , shaderDenormFlushToZeroFloat64{ shaderDenormFlushToZeroFloat64_ } + , shaderRoundingModeRTEFloat16{ shaderRoundingModeRTEFloat16_ } + , shaderRoundingModeRTEFloat32{ shaderRoundingModeRTEFloat32_ } + , shaderRoundingModeRTEFloat64{ shaderRoundingModeRTEFloat64_ } + , shaderRoundingModeRTZFloat16{ shaderRoundingModeRTZFloat16_ } + , shaderRoundingModeRTZFloat32{ shaderRoundingModeRTZFloat32_ } + , shaderRoundingModeRTZFloat64{ shaderRoundingModeRTZFloat64_ } + , maxUpdateAfterBindDescriptorsInAllPools{ maxUpdateAfterBindDescriptorsInAllPools_ } + , shaderUniformBufferArrayNonUniformIndexingNative{ shaderUniformBufferArrayNonUniformIndexingNative_ } + , shaderSampledImageArrayNonUniformIndexingNative{ shaderSampledImageArrayNonUniformIndexingNative_ } + , shaderStorageBufferArrayNonUniformIndexingNative{ shaderStorageBufferArrayNonUniformIndexingNative_ } + , shaderStorageImageArrayNonUniformIndexingNative{ shaderStorageImageArrayNonUniformIndexingNative_ } + , shaderInputAttachmentArrayNonUniformIndexingNative{ shaderInputAttachmentArrayNonUniformIndexingNative_ } + , robustBufferAccessUpdateAfterBind{ robustBufferAccessUpdateAfterBind_ } + , quadDivergentImplicitLod{ quadDivergentImplicitLod_ } + , maxPerStageDescriptorUpdateAfterBindSamplers{ maxPerStageDescriptorUpdateAfterBindSamplers_ } + , maxPerStageDescriptorUpdateAfterBindUniformBuffers{ maxPerStageDescriptorUpdateAfterBindUniformBuffers_ } + , maxPerStageDescriptorUpdateAfterBindStorageBuffers{ maxPerStageDescriptorUpdateAfterBindStorageBuffers_ } + , maxPerStageDescriptorUpdateAfterBindSampledImages{ maxPerStageDescriptorUpdateAfterBindSampledImages_ } + , maxPerStageDescriptorUpdateAfterBindStorageImages{ maxPerStageDescriptorUpdateAfterBindStorageImages_ } + , maxPerStageDescriptorUpdateAfterBindInputAttachments{ maxPerStageDescriptorUpdateAfterBindInputAttachments_ } + , maxPerStageUpdateAfterBindResources{ maxPerStageUpdateAfterBindResources_ } + , maxDescriptorSetUpdateAfterBindSamplers{ maxDescriptorSetUpdateAfterBindSamplers_ } + , maxDescriptorSetUpdateAfterBindUniformBuffers{ maxDescriptorSetUpdateAfterBindUniformBuffers_ } + , maxDescriptorSetUpdateAfterBindUniformBuffersDynamic{ maxDescriptorSetUpdateAfterBindUniformBuffersDynamic_ } + , maxDescriptorSetUpdateAfterBindStorageBuffers{ maxDescriptorSetUpdateAfterBindStorageBuffers_ } + , maxDescriptorSetUpdateAfterBindStorageBuffersDynamic{ maxDescriptorSetUpdateAfterBindStorageBuffersDynamic_ } + , maxDescriptorSetUpdateAfterBindSampledImages{ maxDescriptorSetUpdateAfterBindSampledImages_ } + , maxDescriptorSetUpdateAfterBindStorageImages{ maxDescriptorSetUpdateAfterBindStorageImages_ } + , maxDescriptorSetUpdateAfterBindInputAttachments{ maxDescriptorSetUpdateAfterBindInputAttachments_ } + , supportedDepthResolveModes{ supportedDepthResolveModes_ } + , supportedStencilResolveModes{ supportedStencilResolveModes_ } + , independentResolveNone{ independentResolveNone_ } + , independentResolve{ independentResolve_ } + , filterMinmaxSingleComponentFormats{ filterMinmaxSingleComponentFormats_ } + , filterMinmaxImageComponentMapping{ filterMinmaxImageComponentMapping_ } + , maxTimelineSemaphoreValueDifference{ maxTimelineSemaphoreValueDifference_ } + , framebufferIntegerColorSampleCounts{ framebufferIntegerColorSampleCounts_ } { } @@ -89579,129 +91582,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PhysicalDeviceVulkan12Properties( - VULKAN_HPP_NAMESPACE::DriverId driverID_, - std::string const & driverName_, - std::string const & driverInfo_ = {}, - VULKAN_HPP_NAMESPACE::ConformanceVersion conformanceVersion_ = {}, - VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence denormBehaviorIndependence_ = VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence::e32BitOnly, - VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence roundingModeIndependence_ = VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence::e32BitOnly, - VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat16_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat32_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat64_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat16_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat32_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat64_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat16_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat32_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat64_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat16_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat32_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat64_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat16_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat32_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat64_ = {}, - uint32_t maxUpdateAfterBindDescriptorsInAllPools_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderUniformBufferArrayNonUniformIndexingNative_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderSampledImageArrayNonUniformIndexingNative_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderStorageBufferArrayNonUniformIndexingNative_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageArrayNonUniformIndexingNative_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayNonUniformIndexingNative_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 robustBufferAccessUpdateAfterBind_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 quadDivergentImplicitLod_ = {}, - uint32_t maxPerStageDescriptorUpdateAfterBindSamplers_ = {}, - uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers_ = {}, - uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers_ = {}, - uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages_ = {}, - uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages_ = {}, - uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments_ = {}, - uint32_t maxPerStageUpdateAfterBindResources_ = {}, - uint32_t maxDescriptorSetUpdateAfterBindSamplers_ = {}, - uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers_ = {}, - uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic_ = {}, - uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers_ = {}, - uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic_ = {}, - uint32_t maxDescriptorSetUpdateAfterBindSampledImages_ = {}, - uint32_t maxDescriptorSetUpdateAfterBindStorageImages_ = {}, - uint32_t maxDescriptorSetUpdateAfterBindInputAttachments_ = {}, - VULKAN_HPP_NAMESPACE::ResolveModeFlags supportedDepthResolveModes_ = {}, - VULKAN_HPP_NAMESPACE::ResolveModeFlags supportedStencilResolveModes_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 independentResolveNone_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 independentResolve_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 filterMinmaxSingleComponentFormats_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 filterMinmaxImageComponentMapping_ = {}, - uint64_t maxTimelineSemaphoreValueDifference_ = {}, - VULKAN_HPP_NAMESPACE::SampleCountFlags framebufferIntegerColorSampleCounts_ = {}, - void * pNext_ = nullptr ) - : pNext( pNext_ ) - , driverID( driverID_ ) - , conformanceVersion( conformanceVersion_ ) - , denormBehaviorIndependence( denormBehaviorIndependence_ ) - , roundingModeIndependence( roundingModeIndependence_ ) - , shaderSignedZeroInfNanPreserveFloat16( shaderSignedZeroInfNanPreserveFloat16_ ) - , shaderSignedZeroInfNanPreserveFloat32( shaderSignedZeroInfNanPreserveFloat32_ ) - , shaderSignedZeroInfNanPreserveFloat64( shaderSignedZeroInfNanPreserveFloat64_ ) - , shaderDenormPreserveFloat16( shaderDenormPreserveFloat16_ ) - , shaderDenormPreserveFloat32( shaderDenormPreserveFloat32_ ) - , shaderDenormPreserveFloat64( shaderDenormPreserveFloat64_ ) - , shaderDenormFlushToZeroFloat16( shaderDenormFlushToZeroFloat16_ ) - , shaderDenormFlushToZeroFloat32( shaderDenormFlushToZeroFloat32_ ) - , shaderDenormFlushToZeroFloat64( shaderDenormFlushToZeroFloat64_ ) - , shaderRoundingModeRTEFloat16( shaderRoundingModeRTEFloat16_ ) - , shaderRoundingModeRTEFloat32( shaderRoundingModeRTEFloat32_ ) - , shaderRoundingModeRTEFloat64( shaderRoundingModeRTEFloat64_ ) - , shaderRoundingModeRTZFloat16( shaderRoundingModeRTZFloat16_ ) - , shaderRoundingModeRTZFloat32( shaderRoundingModeRTZFloat32_ ) - , shaderRoundingModeRTZFloat64( shaderRoundingModeRTZFloat64_ ) - , maxUpdateAfterBindDescriptorsInAllPools( maxUpdateAfterBindDescriptorsInAllPools_ ) - , shaderUniformBufferArrayNonUniformIndexingNative( shaderUniformBufferArrayNonUniformIndexingNative_ ) - , shaderSampledImageArrayNonUniformIndexingNative( shaderSampledImageArrayNonUniformIndexingNative_ ) - , shaderStorageBufferArrayNonUniformIndexingNative( shaderStorageBufferArrayNonUniformIndexingNative_ ) - , shaderStorageImageArrayNonUniformIndexingNative( shaderStorageImageArrayNonUniformIndexingNative_ ) - , shaderInputAttachmentArrayNonUniformIndexingNative( shaderInputAttachmentArrayNonUniformIndexingNative_ ) - , robustBufferAccessUpdateAfterBind( robustBufferAccessUpdateAfterBind_ ) - , quadDivergentImplicitLod( quadDivergentImplicitLod_ ) - , maxPerStageDescriptorUpdateAfterBindSamplers( maxPerStageDescriptorUpdateAfterBindSamplers_ ) - , maxPerStageDescriptorUpdateAfterBindUniformBuffers( maxPerStageDescriptorUpdateAfterBindUniformBuffers_ ) - , maxPerStageDescriptorUpdateAfterBindStorageBuffers( maxPerStageDescriptorUpdateAfterBindStorageBuffers_ ) - , maxPerStageDescriptorUpdateAfterBindSampledImages( maxPerStageDescriptorUpdateAfterBindSampledImages_ ) - , maxPerStageDescriptorUpdateAfterBindStorageImages( maxPerStageDescriptorUpdateAfterBindStorageImages_ ) - , maxPerStageDescriptorUpdateAfterBindInputAttachments( maxPerStageDescriptorUpdateAfterBindInputAttachments_ ) - , maxPerStageUpdateAfterBindResources( maxPerStageUpdateAfterBindResources_ ) - , maxDescriptorSetUpdateAfterBindSamplers( maxDescriptorSetUpdateAfterBindSamplers_ ) - , maxDescriptorSetUpdateAfterBindUniformBuffers( maxDescriptorSetUpdateAfterBindUniformBuffers_ ) - , maxDescriptorSetUpdateAfterBindUniformBuffersDynamic( maxDescriptorSetUpdateAfterBindUniformBuffersDynamic_ ) - , maxDescriptorSetUpdateAfterBindStorageBuffers( maxDescriptorSetUpdateAfterBindStorageBuffers_ ) - , maxDescriptorSetUpdateAfterBindStorageBuffersDynamic( maxDescriptorSetUpdateAfterBindStorageBuffersDynamic_ ) - , maxDescriptorSetUpdateAfterBindSampledImages( maxDescriptorSetUpdateAfterBindSampledImages_ ) - , maxDescriptorSetUpdateAfterBindStorageImages( maxDescriptorSetUpdateAfterBindStorageImages_ ) - , maxDescriptorSetUpdateAfterBindInputAttachments( maxDescriptorSetUpdateAfterBindInputAttachments_ ) - , supportedDepthResolveModes( supportedDepthResolveModes_ ) - , supportedStencilResolveModes( supportedStencilResolveModes_ ) - , independentResolveNone( independentResolveNone_ ) - , independentResolve( independentResolve_ ) - , filterMinmaxSingleComponentFormats( filterMinmaxSingleComponentFormats_ ) - , filterMinmaxImageComponentMapping( filterMinmaxImageComponentMapping_ ) - , maxTimelineSemaphoreValueDifference( maxTimelineSemaphoreValueDifference_ ) - , framebufferIntegerColorSampleCounts( framebufferIntegerColorSampleCounts_ ) - { - VULKAN_HPP_ASSERT( driverName_.size() < VK_MAX_DRIVER_NAME_SIZE ); -# if defined( WIN32 ) - strncpy_s( driverName, VK_MAX_DRIVER_NAME_SIZE, driverName_.data(), driverName_.size() ); -# else - strncpy( driverName, driverName_.data(), std::min( VK_MAX_DRIVER_NAME_SIZE, driverName_.size() ) ); -# endif - - VULKAN_HPP_ASSERT( driverInfo_.size() < VK_MAX_DRIVER_INFO_SIZE ); -# if defined( WIN32 ) - strncpy_s( driverInfo, VK_MAX_DRIVER_INFO_SIZE, driverInfo_.data(), driverInfo_.size() ); -# else - strncpy( driverInfo, driverInfo_.data(), std::min( VK_MAX_DRIVER_INFO_SIZE, driverInfo_.size() ) ); -# endif - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - PhysicalDeviceVulkan12Properties & operator=( PhysicalDeviceVulkan12Properties const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -90092,22 +91972,22 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 shaderIntegerDotProduct_ = {}, VULKAN_HPP_NAMESPACE::Bool32 maintenance4_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , robustImageAccess( robustImageAccess_ ) - , inlineUniformBlock( inlineUniformBlock_ ) - , descriptorBindingInlineUniformBlockUpdateAfterBind( descriptorBindingInlineUniformBlockUpdateAfterBind_ ) - , pipelineCreationCacheControl( pipelineCreationCacheControl_ ) - , privateData( privateData_ ) - , shaderDemoteToHelperInvocation( shaderDemoteToHelperInvocation_ ) - , shaderTerminateInvocation( shaderTerminateInvocation_ ) - , subgroupSizeControl( subgroupSizeControl_ ) - , computeFullSubgroups( computeFullSubgroups_ ) - , synchronization2( synchronization2_ ) - , textureCompressionASTC_HDR( textureCompressionASTC_HDR_ ) - , shaderZeroInitializeWorkgroupMemory( shaderZeroInitializeWorkgroupMemory_ ) - , dynamicRendering( dynamicRendering_ ) - , shaderIntegerDotProduct( shaderIntegerDotProduct_ ) - , maintenance4( maintenance4_ ) + : pNext{ pNext_ } + , robustImageAccess{ robustImageAccess_ } + , inlineUniformBlock{ inlineUniformBlock_ } + , descriptorBindingInlineUniformBlockUpdateAfterBind{ descriptorBindingInlineUniformBlockUpdateAfterBind_ } + , pipelineCreationCacheControl{ pipelineCreationCacheControl_ } + , privateData{ privateData_ } + , shaderDemoteToHelperInvocation{ shaderDemoteToHelperInvocation_ } + , shaderTerminateInvocation{ shaderTerminateInvocation_ } + , subgroupSizeControl{ subgroupSizeControl_ } + , computeFullSubgroups{ computeFullSubgroups_ } + , synchronization2{ synchronization2_ } + , textureCompressionASTC_HDR{ textureCompressionASTC_HDR_ } + , shaderZeroInitializeWorkgroupMemory{ shaderZeroInitializeWorkgroupMemory_ } + , dynamicRendering{ dynamicRendering_ } + , shaderIntegerDotProduct{ shaderIntegerDotProduct_ } + , maintenance4{ maintenance4_ } { } @@ -90393,53 +92273,52 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 uniformTexelBufferOffsetSingleTexelAlignment_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize maxBufferSize_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , minSubgroupSize( minSubgroupSize_ ) - , maxSubgroupSize( maxSubgroupSize_ ) - , maxComputeWorkgroupSubgroups( maxComputeWorkgroupSubgroups_ ) - , requiredSubgroupSizeStages( requiredSubgroupSizeStages_ ) - , maxInlineUniformBlockSize( maxInlineUniformBlockSize_ ) - , maxPerStageDescriptorInlineUniformBlocks( maxPerStageDescriptorInlineUniformBlocks_ ) - , maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks( maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks_ ) - , maxDescriptorSetInlineUniformBlocks( maxDescriptorSetInlineUniformBlocks_ ) - , maxDescriptorSetUpdateAfterBindInlineUniformBlocks( maxDescriptorSetUpdateAfterBindInlineUniformBlocks_ ) - , maxInlineUniformTotalSize( maxInlineUniformTotalSize_ ) - , integerDotProduct8BitUnsignedAccelerated( integerDotProduct8BitUnsignedAccelerated_ ) - , integerDotProduct8BitSignedAccelerated( integerDotProduct8BitSignedAccelerated_ ) - , integerDotProduct8BitMixedSignednessAccelerated( integerDotProduct8BitMixedSignednessAccelerated_ ) - , integerDotProduct4x8BitPackedUnsignedAccelerated( integerDotProduct4x8BitPackedUnsignedAccelerated_ ) - , integerDotProduct4x8BitPackedSignedAccelerated( integerDotProduct4x8BitPackedSignedAccelerated_ ) - , integerDotProduct4x8BitPackedMixedSignednessAccelerated( integerDotProduct4x8BitPackedMixedSignednessAccelerated_ ) - , integerDotProduct16BitUnsignedAccelerated( integerDotProduct16BitUnsignedAccelerated_ ) - , integerDotProduct16BitSignedAccelerated( integerDotProduct16BitSignedAccelerated_ ) - , integerDotProduct16BitMixedSignednessAccelerated( integerDotProduct16BitMixedSignednessAccelerated_ ) - , integerDotProduct32BitUnsignedAccelerated( integerDotProduct32BitUnsignedAccelerated_ ) - , integerDotProduct32BitSignedAccelerated( integerDotProduct32BitSignedAccelerated_ ) - , integerDotProduct32BitMixedSignednessAccelerated( integerDotProduct32BitMixedSignednessAccelerated_ ) - , integerDotProduct64BitUnsignedAccelerated( integerDotProduct64BitUnsignedAccelerated_ ) - , integerDotProduct64BitSignedAccelerated( integerDotProduct64BitSignedAccelerated_ ) - , integerDotProduct64BitMixedSignednessAccelerated( integerDotProduct64BitMixedSignednessAccelerated_ ) - , integerDotProductAccumulatingSaturating8BitUnsignedAccelerated( integerDotProductAccumulatingSaturating8BitUnsignedAccelerated_ ) - , integerDotProductAccumulatingSaturating8BitSignedAccelerated( integerDotProductAccumulatingSaturating8BitSignedAccelerated_ ) - , integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated( integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated_ ) - , integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated( integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated_ ) - , integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated( integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated_ ) - , integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated( - integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated_ ) - , integerDotProductAccumulatingSaturating16BitUnsignedAccelerated( integerDotProductAccumulatingSaturating16BitUnsignedAccelerated_ ) - , integerDotProductAccumulatingSaturating16BitSignedAccelerated( integerDotProductAccumulatingSaturating16BitSignedAccelerated_ ) - , integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated( integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated_ ) - , integerDotProductAccumulatingSaturating32BitUnsignedAccelerated( integerDotProductAccumulatingSaturating32BitUnsignedAccelerated_ ) - , integerDotProductAccumulatingSaturating32BitSignedAccelerated( integerDotProductAccumulatingSaturating32BitSignedAccelerated_ ) - , integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated( integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated_ ) - , integerDotProductAccumulatingSaturating64BitUnsignedAccelerated( integerDotProductAccumulatingSaturating64BitUnsignedAccelerated_ ) - , integerDotProductAccumulatingSaturating64BitSignedAccelerated( integerDotProductAccumulatingSaturating64BitSignedAccelerated_ ) - , integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated( integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated_ ) - , storageTexelBufferOffsetAlignmentBytes( storageTexelBufferOffsetAlignmentBytes_ ) - , storageTexelBufferOffsetSingleTexelAlignment( storageTexelBufferOffsetSingleTexelAlignment_ ) - , uniformTexelBufferOffsetAlignmentBytes( uniformTexelBufferOffsetAlignmentBytes_ ) - , uniformTexelBufferOffsetSingleTexelAlignment( uniformTexelBufferOffsetSingleTexelAlignment_ ) - , maxBufferSize( maxBufferSize_ ) + : pNext{ pNext_ } + , minSubgroupSize{ minSubgroupSize_ } + , maxSubgroupSize{ maxSubgroupSize_ } + , maxComputeWorkgroupSubgroups{ maxComputeWorkgroupSubgroups_ } + , requiredSubgroupSizeStages{ requiredSubgroupSizeStages_ } + , maxInlineUniformBlockSize{ maxInlineUniformBlockSize_ } + , maxPerStageDescriptorInlineUniformBlocks{ maxPerStageDescriptorInlineUniformBlocks_ } + , maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks{ maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks_ } + , maxDescriptorSetInlineUniformBlocks{ maxDescriptorSetInlineUniformBlocks_ } + , maxDescriptorSetUpdateAfterBindInlineUniformBlocks{ maxDescriptorSetUpdateAfterBindInlineUniformBlocks_ } + , maxInlineUniformTotalSize{ maxInlineUniformTotalSize_ } + , integerDotProduct8BitUnsignedAccelerated{ integerDotProduct8BitUnsignedAccelerated_ } + , integerDotProduct8BitSignedAccelerated{ integerDotProduct8BitSignedAccelerated_ } + , integerDotProduct8BitMixedSignednessAccelerated{ integerDotProduct8BitMixedSignednessAccelerated_ } + , integerDotProduct4x8BitPackedUnsignedAccelerated{ integerDotProduct4x8BitPackedUnsignedAccelerated_ } + , integerDotProduct4x8BitPackedSignedAccelerated{ integerDotProduct4x8BitPackedSignedAccelerated_ } + , integerDotProduct4x8BitPackedMixedSignednessAccelerated{ integerDotProduct4x8BitPackedMixedSignednessAccelerated_ } + , integerDotProduct16BitUnsignedAccelerated{ integerDotProduct16BitUnsignedAccelerated_ } + , integerDotProduct16BitSignedAccelerated{ integerDotProduct16BitSignedAccelerated_ } + , integerDotProduct16BitMixedSignednessAccelerated{ integerDotProduct16BitMixedSignednessAccelerated_ } + , integerDotProduct32BitUnsignedAccelerated{ integerDotProduct32BitUnsignedAccelerated_ } + , integerDotProduct32BitSignedAccelerated{ integerDotProduct32BitSignedAccelerated_ } + , integerDotProduct32BitMixedSignednessAccelerated{ integerDotProduct32BitMixedSignednessAccelerated_ } + , integerDotProduct64BitUnsignedAccelerated{ integerDotProduct64BitUnsignedAccelerated_ } + , integerDotProduct64BitSignedAccelerated{ integerDotProduct64BitSignedAccelerated_ } + , integerDotProduct64BitMixedSignednessAccelerated{ integerDotProduct64BitMixedSignednessAccelerated_ } + , integerDotProductAccumulatingSaturating8BitUnsignedAccelerated{ integerDotProductAccumulatingSaturating8BitUnsignedAccelerated_ } + , integerDotProductAccumulatingSaturating8BitSignedAccelerated{ integerDotProductAccumulatingSaturating8BitSignedAccelerated_ } + , integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated{ integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated_ } + , integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated{ integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated_ } + , integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated{ integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated_ } + , integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated{ integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated_ } + , integerDotProductAccumulatingSaturating16BitUnsignedAccelerated{ integerDotProductAccumulatingSaturating16BitUnsignedAccelerated_ } + , integerDotProductAccumulatingSaturating16BitSignedAccelerated{ integerDotProductAccumulatingSaturating16BitSignedAccelerated_ } + , integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated{ integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated_ } + , integerDotProductAccumulatingSaturating32BitUnsignedAccelerated{ integerDotProductAccumulatingSaturating32BitUnsignedAccelerated_ } + , integerDotProductAccumulatingSaturating32BitSignedAccelerated{ integerDotProductAccumulatingSaturating32BitSignedAccelerated_ } + , integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated{ integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated_ } + , integerDotProductAccumulatingSaturating64BitUnsignedAccelerated{ integerDotProductAccumulatingSaturating64BitUnsignedAccelerated_ } + , integerDotProductAccumulatingSaturating64BitSignedAccelerated{ integerDotProductAccumulatingSaturating64BitSignedAccelerated_ } + , integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated{ integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated_ } + , storageTexelBufferOffsetAlignmentBytes{ storageTexelBufferOffsetAlignmentBytes_ } + , storageTexelBufferOffsetSingleTexelAlignment{ storageTexelBufferOffsetSingleTexelAlignment_ } + , uniformTexelBufferOffsetAlignmentBytes{ uniformTexelBufferOffsetAlignmentBytes_ } + , uniformTexelBufferOffsetSingleTexelAlignment{ uniformTexelBufferOffsetSingleTexelAlignment_ } + , maxBufferSize{ maxBufferSize_ } { } @@ -90707,10 +92586,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModelDeviceScope_ = {}, VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModelAvailabilityVisibilityChains_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , vulkanMemoryModel( vulkanMemoryModel_ ) - , vulkanMemoryModelDeviceScope( vulkanMemoryModelDeviceScope_ ) - , vulkanMemoryModelAvailabilityVisibilityChains( vulkanMemoryModelAvailabilityVisibilityChains_ ) + : pNext{ pNext_ } + , vulkanMemoryModel{ vulkanMemoryModel_ } + , vulkanMemoryModelDeviceScope{ vulkanMemoryModelDeviceScope_ } + , vulkanMemoryModelAvailabilityVisibilityChains{ vulkanMemoryModelAvailabilityVisibilityChains_ } { } @@ -90835,11 +92714,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 workgroupMemoryExplicitLayout8BitAccess_ = {}, VULKAN_HPP_NAMESPACE::Bool32 workgroupMemoryExplicitLayout16BitAccess_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , workgroupMemoryExplicitLayout( workgroupMemoryExplicitLayout_ ) - , workgroupMemoryExplicitLayoutScalarBlockLayout( workgroupMemoryExplicitLayoutScalarBlockLayout_ ) - , workgroupMemoryExplicitLayout8BitAccess( workgroupMemoryExplicitLayout8BitAccess_ ) - , workgroupMemoryExplicitLayout16BitAccess( workgroupMemoryExplicitLayout16BitAccess_ ) + : pNext{ pNext_ } + , workgroupMemoryExplicitLayout{ workgroupMemoryExplicitLayout_ } + , workgroupMemoryExplicitLayoutScalarBlockLayout{ workgroupMemoryExplicitLayoutScalarBlockLayout_ } + , workgroupMemoryExplicitLayout8BitAccess{ workgroupMemoryExplicitLayout8BitAccess_ } + , workgroupMemoryExplicitLayout16BitAccess{ workgroupMemoryExplicitLayout16BitAccess_ } { } @@ -90976,8 +92855,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 ycbcr2plane444Formats_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , ycbcr2plane444Formats( ycbcr2plane444Formats_ ) + : pNext{ pNext_ } + , ycbcr2plane444Formats{ ycbcr2plane444Formats_ } { } @@ -91074,8 +92953,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceYcbcrDegammaFeaturesQCOM( VULKAN_HPP_NAMESPACE::Bool32 ycbcrDegamma_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , ycbcrDegamma( ycbcrDegamma_ ) + : pNext{ pNext_ } + , ycbcrDegamma{ ycbcrDegamma_ } { } @@ -91171,8 +93050,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceYcbcrImageArraysFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 ycbcrImageArrays_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , ycbcrImageArrays( ycbcrImageArrays_ ) + : pNext{ pNext_ } + , ycbcrImageArrays{ ycbcrImageArrays_ } { } @@ -91269,8 +93148,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PhysicalDeviceZeroInitializeWorkgroupMemoryFeatures( VULKAN_HPP_NAMESPACE::Bool32 shaderZeroInitializeWorkgroupMemory_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shaderZeroInitializeWorkgroupMemory( shaderZeroInitializeWorkgroupMemory_ ) + : pNext{ pNext_ } + , shaderZeroInitializeWorkgroupMemory{ shaderZeroInitializeWorkgroupMemory_ } { } @@ -91361,6 +93240,910 @@ namespace VULKAN_HPP_NAMESPACE using PhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR = PhysicalDeviceZeroInitializeWorkgroupMemoryFeatures; + struct PipelineBinaryKeyKHR + { + using NativeType = VkPipelineBinaryKeyKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineBinaryKeyKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryKeyKHR( uint32_t keySize_ = {}, + std::array const & key_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , keySize{ keySize_ } + , key{ key_ } + { + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryKeyKHR( PipelineBinaryKeyKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineBinaryKeyKHR( VkPipelineBinaryKeyKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PipelineBinaryKeyKHR( *reinterpret_cast( &rhs ) ) + { + } + + PipelineBinaryKeyKHR & operator=( PipelineBinaryKeyKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PipelineBinaryKeyKHR & operator=( VkPipelineBinaryKeyKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryKeyKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryKeyKHR & setKeySize( uint32_t keySize_ ) VULKAN_HPP_NOEXCEPT + { + keySize = keySize_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryKeyKHR & setKey( std::array key_ ) VULKAN_HPP_NOEXCEPT + { + key = key_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPipelineBinaryKeyKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineBinaryKeyKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple const &> +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, keySize, key ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PipelineBinaryKeyKHR const & ) const = default; +#else + bool operator==( PipelineBinaryKeyKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( keySize == rhs.keySize ) && ( key == rhs.key ); +# endif + } + + bool operator!=( PipelineBinaryKeyKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineBinaryKeyKHR; + void * pNext = {}; + uint32_t keySize = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D key = {}; + }; + + template <> + struct CppType + { + using Type = PipelineBinaryKeyKHR; + }; + + struct PipelineBinaryDataKHR + { + using NativeType = VkPipelineBinaryDataKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineBinaryDataKHR( size_t dataSize_ = {}, void * pData_ = {} ) VULKAN_HPP_NOEXCEPT + : dataSize{ dataSize_ } + , pData{ pData_ } + { + } + + VULKAN_HPP_CONSTEXPR PipelineBinaryDataKHR( PipelineBinaryDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineBinaryDataKHR( VkPipelineBinaryDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PipelineBinaryDataKHR( *reinterpret_cast( &rhs ) ) + { + } + +# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) + template + PipelineBinaryDataKHR( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & data_ ) : dataSize( data_.size() * sizeof( T ) ), pData( data_.data() ) + { + } +# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + PipelineBinaryDataKHR & operator=( PipelineBinaryDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PipelineBinaryDataKHR & operator=( VkPipelineBinaryDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryDataKHR & setDataSize( size_t dataSize_ ) VULKAN_HPP_NOEXCEPT + { + dataSize = dataSize_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryDataKHR & setPData( void * pData_ ) VULKAN_HPP_NOEXCEPT + { + pData = pData_; + return *this; + } + +# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) + template + PipelineBinaryDataKHR & setData( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & data_ ) VULKAN_HPP_NOEXCEPT + { + dataSize = data_.size() * sizeof( T ); + pData = data_.data(); + return *this; + } +# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPipelineBinaryDataKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineBinaryDataKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( dataSize, pData ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PipelineBinaryDataKHR const & ) const = default; +#else + bool operator==( PipelineBinaryDataKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( dataSize == rhs.dataSize ) && ( pData == rhs.pData ); +# endif + } + + bool operator!=( PipelineBinaryDataKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + size_t dataSize = {}; + void * pData = {}; + }; + + struct PipelineBinaryKeysAndDataKHR + { + using NativeType = VkPipelineBinaryKeysAndDataKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryKeysAndDataKHR( uint32_t binaryCount_ = {}, + const VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR * pPipelineBinaryKeys_ = {}, + const VULKAN_HPP_NAMESPACE::PipelineBinaryDataKHR * pPipelineBinaryData_ = {} ) VULKAN_HPP_NOEXCEPT + : binaryCount{ binaryCount_ } + , pPipelineBinaryKeys{ pPipelineBinaryKeys_ } + , pPipelineBinaryData{ pPipelineBinaryData_ } + { + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryKeysAndDataKHR( PipelineBinaryKeysAndDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineBinaryKeysAndDataKHR( VkPipelineBinaryKeysAndDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PipelineBinaryKeysAndDataKHR( *reinterpret_cast( &rhs ) ) + { + } + +# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) + PipelineBinaryKeysAndDataKHR( + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pipelineBinaryKeys_, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pipelineBinaryData_ = {} ) + : binaryCount( static_cast( pipelineBinaryKeys_.size() ) ) + , pPipelineBinaryKeys( pipelineBinaryKeys_.data() ) + , pPipelineBinaryData( pipelineBinaryData_.data() ) + { +# ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( pipelineBinaryKeys_.size() == pipelineBinaryData_.size() ); +# else + if ( pipelineBinaryKeys_.size() != pipelineBinaryData_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING + "::PipelineBinaryKeysAndDataKHR::PipelineBinaryKeysAndDataKHR: pipelineBinaryKeys_.size() != pipelineBinaryData_.size()" ); + } +# endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + } +# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + PipelineBinaryKeysAndDataKHR & operator=( PipelineBinaryKeysAndDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PipelineBinaryKeysAndDataKHR & operator=( VkPipelineBinaryKeysAndDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryKeysAndDataKHR & setBinaryCount( uint32_t binaryCount_ ) VULKAN_HPP_NOEXCEPT + { + binaryCount = binaryCount_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryKeysAndDataKHR & + setPPipelineBinaryKeys( const VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR * pPipelineBinaryKeys_ ) VULKAN_HPP_NOEXCEPT + { + pPipelineBinaryKeys = pPipelineBinaryKeys_; + return *this; + } + +# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) + PipelineBinaryKeysAndDataKHR & setPipelineBinaryKeys( + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pipelineBinaryKeys_ ) VULKAN_HPP_NOEXCEPT + { + binaryCount = static_cast( pipelineBinaryKeys_.size() ); + pPipelineBinaryKeys = pipelineBinaryKeys_.data(); + return *this; + } +# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryKeysAndDataKHR & + setPPipelineBinaryData( const VULKAN_HPP_NAMESPACE::PipelineBinaryDataKHR * pPipelineBinaryData_ ) VULKAN_HPP_NOEXCEPT + { + pPipelineBinaryData = pPipelineBinaryData_; + return *this; + } + +# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) + PipelineBinaryKeysAndDataKHR & setPipelineBinaryData( + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pipelineBinaryData_ ) VULKAN_HPP_NOEXCEPT + { + binaryCount = static_cast( pipelineBinaryData_.size() ); + pPipelineBinaryData = pipelineBinaryData_.data(); + return *this; + } +# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPipelineBinaryKeysAndDataKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineBinaryKeysAndDataKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( binaryCount, pPipelineBinaryKeys, pPipelineBinaryData ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PipelineBinaryKeysAndDataKHR const & ) const = default; +#else + bool operator==( PipelineBinaryKeysAndDataKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( binaryCount == rhs.binaryCount ) && ( pPipelineBinaryKeys == rhs.pPipelineBinaryKeys ) && ( pPipelineBinaryData == rhs.pPipelineBinaryData ); +# endif + } + + bool operator!=( PipelineBinaryKeysAndDataKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + uint32_t binaryCount = {}; + const VULKAN_HPP_NAMESPACE::PipelineBinaryKeyKHR * pPipelineBinaryKeys = {}; + const VULKAN_HPP_NAMESPACE::PipelineBinaryDataKHR * pPipelineBinaryData = {}; + }; + + struct PipelineCreateInfoKHR + { + using NativeType = VkPipelineCreateInfoKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineCreateInfoKHR( void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } {} + + VULKAN_HPP_CONSTEXPR PipelineCreateInfoKHR( PipelineCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineCreateInfoKHR( VkPipelineCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PipelineCreateInfoKHR( *reinterpret_cast( &rhs ) ) + { + } + + PipelineCreateInfoKHR & operator=( PipelineCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PipelineCreateInfoKHR & operator=( VkPipelineCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PipelineCreateInfoKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPipelineCreateInfoKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PipelineCreateInfoKHR const & ) const = default; +#else + bool operator==( PipelineCreateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ); +# endif + } + + bool operator!=( PipelineCreateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineCreateInfoKHR; + void * pNext = {}; + }; + + template <> + struct CppType + { + using Type = PipelineCreateInfoKHR; + }; + + struct PipelineBinaryCreateInfoKHR + { + using NativeType = VkPipelineBinaryCreateInfoKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineBinaryCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryCreateInfoKHR( const VULKAN_HPP_NAMESPACE::PipelineBinaryKeysAndDataKHR * pKeysAndDataInfo_ = {}, + VULKAN_HPP_NAMESPACE::Pipeline pipeline_ = {}, + const VULKAN_HPP_NAMESPACE::PipelineCreateInfoKHR * pPipelineCreateInfo_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , pKeysAndDataInfo{ pKeysAndDataInfo_ } + , pipeline{ pipeline_ } + , pPipelineCreateInfo{ pPipelineCreateInfo_ } + { + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryCreateInfoKHR( PipelineBinaryCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineBinaryCreateInfoKHR( VkPipelineBinaryCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PipelineBinaryCreateInfoKHR( *reinterpret_cast( &rhs ) ) + { + } + + PipelineBinaryCreateInfoKHR & operator=( PipelineBinaryCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PipelineBinaryCreateInfoKHR & operator=( VkPipelineBinaryCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryCreateInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryCreateInfoKHR & + setPKeysAndDataInfo( const VULKAN_HPP_NAMESPACE::PipelineBinaryKeysAndDataKHR * pKeysAndDataInfo_ ) VULKAN_HPP_NOEXCEPT + { + pKeysAndDataInfo = pKeysAndDataInfo_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryCreateInfoKHR & setPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline_ ) VULKAN_HPP_NOEXCEPT + { + pipeline = pipeline_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryCreateInfoKHR & + setPPipelineCreateInfo( const VULKAN_HPP_NAMESPACE::PipelineCreateInfoKHR * pPipelineCreateInfo_ ) VULKAN_HPP_NOEXCEPT + { + pPipelineCreateInfo = pPipelineCreateInfo_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPipelineBinaryCreateInfoKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineBinaryCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, pKeysAndDataInfo, pipeline, pPipelineCreateInfo ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PipelineBinaryCreateInfoKHR const & ) const = default; +#else + bool operator==( PipelineBinaryCreateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( pKeysAndDataInfo == rhs.pKeysAndDataInfo ) && ( pipeline == rhs.pipeline ) && + ( pPipelineCreateInfo == rhs.pPipelineCreateInfo ); +# endif + } + + bool operator!=( PipelineBinaryCreateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineBinaryCreateInfoKHR; + const void * pNext = {}; + const VULKAN_HPP_NAMESPACE::PipelineBinaryKeysAndDataKHR * pKeysAndDataInfo = {}; + VULKAN_HPP_NAMESPACE::Pipeline pipeline = {}; + const VULKAN_HPP_NAMESPACE::PipelineCreateInfoKHR * pPipelineCreateInfo = {}; + }; + + template <> + struct CppType + { + using Type = PipelineBinaryCreateInfoKHR; + }; + + struct PipelineBinaryDataInfoKHR + { + using NativeType = VkPipelineBinaryDataInfoKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineBinaryDataInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineBinaryDataInfoKHR( VULKAN_HPP_NAMESPACE::PipelineBinaryKHR pipelineBinary_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , pipelineBinary{ pipelineBinary_ } + { + } + + VULKAN_HPP_CONSTEXPR PipelineBinaryDataInfoKHR( PipelineBinaryDataInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineBinaryDataInfoKHR( VkPipelineBinaryDataInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PipelineBinaryDataInfoKHR( *reinterpret_cast( &rhs ) ) + { + } + + PipelineBinaryDataInfoKHR & operator=( PipelineBinaryDataInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PipelineBinaryDataInfoKHR & operator=( VkPipelineBinaryDataInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryDataInfoKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryDataInfoKHR & setPipelineBinary( VULKAN_HPP_NAMESPACE::PipelineBinaryKHR pipelineBinary_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBinary = pipelineBinary_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPipelineBinaryDataInfoKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineBinaryDataInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, pipelineBinary ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PipelineBinaryDataInfoKHR const & ) const = default; +#else + bool operator==( PipelineBinaryDataInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( pipelineBinary == rhs.pipelineBinary ); +# endif + } + + bool operator!=( PipelineBinaryDataInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineBinaryDataInfoKHR; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineBinaryKHR pipelineBinary = {}; + }; + + template <> + struct CppType + { + using Type = PipelineBinaryDataInfoKHR; + }; + + struct PipelineBinaryHandlesInfoKHR + { + using NativeType = VkPipelineBinaryHandlesInfoKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineBinaryHandlesInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineBinaryHandlesInfoKHR( uint32_t pipelineBinaryCount_ = {}, + VULKAN_HPP_NAMESPACE::PipelineBinaryKHR * pPipelineBinaries_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , pipelineBinaryCount{ pipelineBinaryCount_ } + , pPipelineBinaries{ pPipelineBinaries_ } + { + } + + VULKAN_HPP_CONSTEXPR PipelineBinaryHandlesInfoKHR( PipelineBinaryHandlesInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineBinaryHandlesInfoKHR( VkPipelineBinaryHandlesInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PipelineBinaryHandlesInfoKHR( *reinterpret_cast( &rhs ) ) + { + } + +# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) + PipelineBinaryHandlesInfoKHR( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pipelineBinaries_, + const void * pNext_ = nullptr ) + : pNext( pNext_ ), pipelineBinaryCount( static_cast( pipelineBinaries_.size() ) ), pPipelineBinaries( pipelineBinaries_.data() ) + { + } +# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + PipelineBinaryHandlesInfoKHR & operator=( PipelineBinaryHandlesInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PipelineBinaryHandlesInfoKHR & operator=( VkPipelineBinaryHandlesInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryHandlesInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryHandlesInfoKHR & setPipelineBinaryCount( uint32_t pipelineBinaryCount_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBinaryCount = pipelineBinaryCount_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryHandlesInfoKHR & + setPPipelineBinaries( VULKAN_HPP_NAMESPACE::PipelineBinaryKHR * pPipelineBinaries_ ) VULKAN_HPP_NOEXCEPT + { + pPipelineBinaries = pPipelineBinaries_; + return *this; + } + +# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) + PipelineBinaryHandlesInfoKHR & setPipelineBinaries( + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pipelineBinaries_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBinaryCount = static_cast( pipelineBinaries_.size() ); + pPipelineBinaries = pipelineBinaries_.data(); + return *this; + } +# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPipelineBinaryHandlesInfoKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineBinaryHandlesInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, pipelineBinaryCount, pPipelineBinaries ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PipelineBinaryHandlesInfoKHR const & ) const = default; +#else + bool operator==( PipelineBinaryHandlesInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( pipelineBinaryCount == rhs.pipelineBinaryCount ) && + ( pPipelineBinaries == rhs.pPipelineBinaries ); +# endif + } + + bool operator!=( PipelineBinaryHandlesInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineBinaryHandlesInfoKHR; + const void * pNext = {}; + uint32_t pipelineBinaryCount = {}; + VULKAN_HPP_NAMESPACE::PipelineBinaryKHR * pPipelineBinaries = {}; + }; + + template <> + struct CppType + { + using Type = PipelineBinaryHandlesInfoKHR; + }; + + struct PipelineBinaryInfoKHR + { + using NativeType = VkPipelineBinaryInfoKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineBinaryInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineBinaryInfoKHR( uint32_t binaryCount_ = {}, + const VULKAN_HPP_NAMESPACE::PipelineBinaryKHR * pPipelineBinaries_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , binaryCount{ binaryCount_ } + , pPipelineBinaries{ pPipelineBinaries_ } + { + } + + VULKAN_HPP_CONSTEXPR PipelineBinaryInfoKHR( PipelineBinaryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineBinaryInfoKHR( VkPipelineBinaryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : PipelineBinaryInfoKHR( *reinterpret_cast( &rhs ) ) + { + } + +# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) + PipelineBinaryInfoKHR( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pipelineBinaries_, + const void * pNext_ = nullptr ) + : pNext( pNext_ ), binaryCount( static_cast( pipelineBinaries_.size() ) ), pPipelineBinaries( pipelineBinaries_.data() ) + { + } +# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + PipelineBinaryInfoKHR & operator=( PipelineBinaryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PipelineBinaryInfoKHR & operator=( VkPipelineBinaryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryInfoKHR & setBinaryCount( uint32_t binaryCount_ ) VULKAN_HPP_NOEXCEPT + { + binaryCount = binaryCount_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PipelineBinaryInfoKHR & + setPPipelineBinaries( const VULKAN_HPP_NAMESPACE::PipelineBinaryKHR * pPipelineBinaries_ ) VULKAN_HPP_NOEXCEPT + { + pPipelineBinaries = pPipelineBinaries_; + return *this; + } + +# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) + PipelineBinaryInfoKHR & setPipelineBinaries( + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pipelineBinaries_ ) VULKAN_HPP_NOEXCEPT + { + binaryCount = static_cast( pipelineBinaries_.size() ); + pPipelineBinaries = pipelineBinaries_.data(); + return *this; + } +# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPipelineBinaryInfoKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineBinaryInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, binaryCount, pPipelineBinaries ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PipelineBinaryInfoKHR const & ) const = default; +#else + bool operator==( PipelineBinaryInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( binaryCount == rhs.binaryCount ) && ( pPipelineBinaries == rhs.pPipelineBinaries ); +# endif + } + + bool operator!=( PipelineBinaryInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineBinaryInfoKHR; + const void * pNext = {}; + uint32_t binaryCount = {}; + const VULKAN_HPP_NAMESPACE::PipelineBinaryKHR * pPipelineBinaries = {}; + }; + + template <> + struct CppType + { + using Type = PipelineBinaryInfoKHR; + }; + struct PipelineCacheCreateInfo { using NativeType = VkPipelineCacheCreateInfo; @@ -91373,10 +94156,10 @@ namespace VULKAN_HPP_NAMESPACE size_t initialDataSize_ = {}, const void * pInitialData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , initialDataSize( initialDataSize_ ) - , pInitialData( pInitialData_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , initialDataSize{ initialDataSize_ } + , pInitialData{ pInitialData_ } { } @@ -91512,11 +94295,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t vendorID_ = {}, uint32_t deviceID_ = {}, std::array const & pipelineCacheUUID_ = {} ) VULKAN_HPP_NOEXCEPT - : headerSize( headerSize_ ) - , headerVersion( headerVersion_ ) - , vendorID( vendorID_ ) - , deviceID( deviceID_ ) - , pipelineCacheUUID( pipelineCacheUUID_ ) + : headerSize{ headerSize_ } + , headerVersion{ headerVersion_ } + , vendorID{ vendorID_ } + , deviceID{ deviceID_ } + , pipelineCacheUUID{ pipelineCacheUUID_ } { } @@ -91635,10 +94418,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 dstPremultiplied_ = {}, VULKAN_HPP_NAMESPACE::BlendOverlapEXT blendOverlap_ = VULKAN_HPP_NAMESPACE::BlendOverlapEXT::eUncorrelated, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcPremultiplied( srcPremultiplied_ ) - , dstPremultiplied( dstPremultiplied_ ) - , blendOverlap( blendOverlap_ ) + : pNext{ pNext_ } + , srcPremultiplied{ srcPremultiplied_ } + , dstPremultiplied{ dstPremultiplied_ } + , blendOverlap{ blendOverlap_ } { } @@ -91757,9 +94540,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PipelineColorWriteCreateInfoEXT( uint32_t attachmentCount_ = {}, const VULKAN_HPP_NAMESPACE::Bool32 * pColorWriteEnables_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , attachmentCount( attachmentCount_ ) - , pColorWriteEnables( pColorWriteEnables_ ) + : pNext{ pNext_ } + , attachmentCount{ attachmentCount_ } + , pColorWriteEnables{ pColorWriteEnables_ } { } @@ -91881,8 +94664,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PipelineCompilerControlCreateInfoAMD( VULKAN_HPP_NAMESPACE::PipelineCompilerControlFlagsAMD compilerControlFlags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , compilerControlFlags( compilerControlFlags_ ) + : pNext{ pNext_ } + , compilerControlFlags{ compilerControlFlags_ } { } @@ -91984,12 +94767,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t coverageModulationTableCount_ = {}, const float * pCoverageModulationTable_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , coverageModulationMode( coverageModulationMode_ ) - , coverageModulationTableEnable( coverageModulationTableEnable_ ) - , coverageModulationTableCount( coverageModulationTableCount_ ) - , pCoverageModulationTable( pCoverageModulationTable_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , coverageModulationMode{ coverageModulationMode_ } + , coverageModulationTableEnable{ coverageModulationTableEnable_ } + , coverageModulationTableCount{ coverageModulationTableCount_ } + , pCoverageModulationTable{ pCoverageModulationTable_ } { } @@ -92154,9 +94937,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::PipelineCoverageReductionStateCreateFlagsNV flags_ = {}, VULKAN_HPP_NAMESPACE::CoverageReductionModeNV coverageReductionMode_ = VULKAN_HPP_NAMESPACE::CoverageReductionModeNV::eMerge, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , coverageReductionMode( coverageReductionMode_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , coverageReductionMode{ coverageReductionMode_ } { } @@ -92266,10 +95049,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 coverageToColorEnable_ = {}, uint32_t coverageToColorLocation_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , coverageToColorEnable( coverageToColorEnable_ ) - , coverageToColorLocation( coverageToColorLocation_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , coverageToColorEnable{ coverageToColorEnable_ } + , coverageToColorLocation{ coverageToColorLocation_ } { } @@ -92386,8 +95169,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PipelineCreateFlags2CreateInfoKHR( VULKAN_HPP_NAMESPACE::PipelineCreateFlags2KHR flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , flags{ flags_ } { } @@ -92480,8 +95263,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PipelineCreationFeedback( VULKAN_HPP_NAMESPACE::PipelineCreationFeedbackFlags flags_ = {}, uint64_t duration_ = {} ) VULKAN_HPP_NOEXCEPT - : flags( flags_ ) - , duration( duration_ ) + : flags{ flags_ } + , duration{ duration_ } { } @@ -92560,10 +95343,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t pipelineStageCreationFeedbackCount_ = {}, VULKAN_HPP_NAMESPACE::PipelineCreationFeedback * pPipelineStageCreationFeedbacks_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pPipelineCreationFeedback( pPipelineCreationFeedback_ ) - , pipelineStageCreationFeedbackCount( pipelineStageCreationFeedbackCount_ ) - , pPipelineStageCreationFeedbacks( pPipelineStageCreationFeedbacks_ ) + : pNext{ pNext_ } + , pPipelineCreationFeedback{ pPipelineCreationFeedback_ } + , pipelineStageCreationFeedbackCount{ pipelineStageCreationFeedbackCount_ } + , pPipelineStageCreationFeedbacks{ pPipelineStageCreationFeedbacks_ } { } @@ -92712,11 +95495,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t discardRectangleCount_ = {}, const VULKAN_HPP_NAMESPACE::Rect2D * pDiscardRectangles_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , discardRectangleMode( discardRectangleMode_ ) - , discardRectangleCount( discardRectangleCount_ ) - , pDiscardRectangles( pDiscardRectangles_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , discardRectangleMode{ discardRectangleMode_ } + , discardRectangleCount{ discardRectangleCount_ } + , pDiscardRectangles{ pDiscardRectangles_ } { } @@ -92867,9 +95650,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PipelineExecutableInfoKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline_ = {}, uint32_t executableIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pipeline( pipeline_ ) - , executableIndex( executableIndex_ ) + : pNext{ pNext_ } + , pipeline{ pipeline_ } + , executableIndex{ executableIndex_ } { } @@ -92976,12 +95759,12 @@ namespace VULKAN_HPP_NAMESPACE size_t dataSize_ = {}, void * pData_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , name( name_ ) - , description( description_ ) - , isText( isText_ ) - , dataSize( dataSize_ ) - , pData( pData_ ) + : pNext{ pNext_ } + , name{ name_ } + , description{ description_ } + , isText{ isText_ } + , dataSize{ dataSize_ } + , pData{ pData_ } { } @@ -92993,31 +95776,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - template - PipelineExecutableInternalRepresentationKHR( std::string const & name_, - std::string const & description_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 isText_ = {}, - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & data_ = {}, - void * pNext_ = nullptr ) - : pNext( pNext_ ), isText( isText_ ), dataSize( data_.size() * sizeof( T ) ), pData( data_.data() ) - { - VULKAN_HPP_ASSERT( name_.size() < VK_MAX_DESCRIPTION_SIZE ); -# if defined( WIN32 ) - strncpy_s( name, VK_MAX_DESCRIPTION_SIZE, name_.data(), name_.size() ); -# else - strncpy( name, name_.data(), std::min( VK_MAX_DESCRIPTION_SIZE, name_.size() ) ); -# endif - - VULKAN_HPP_ASSERT( description_.size() < VK_MAX_DESCRIPTION_SIZE ); -# if defined( WIN32 ) - strncpy_s( description, VK_MAX_DESCRIPTION_SIZE, description_.data(), description_.size() ); -# else - strncpy( description, description_.data(), std::min( VK_MAX_DESCRIPTION_SIZE, description_.size() ) ); -# endif - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - PipelineExecutableInternalRepresentationKHR & operator=( PipelineExecutableInternalRepresentationKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -93117,11 +95875,11 @@ namespace VULKAN_HPP_NAMESPACE std::array const & description_ = {}, uint32_t subgroupSize_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stages( stages_ ) - , name( name_ ) - , description( description_ ) - , subgroupSize( subgroupSize_ ) + : pNext{ pNext_ } + , stages{ stages_ } + , name{ name_ } + , description{ description_ } + , subgroupSize{ subgroupSize_ } { } @@ -93132,30 +95890,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PipelineExecutablePropertiesKHR( VULKAN_HPP_NAMESPACE::ShaderStageFlags stages_, - std::string const & name_, - std::string const & description_ = {}, - uint32_t subgroupSize_ = {}, - void * pNext_ = nullptr ) - : pNext( pNext_ ), stages( stages_ ), subgroupSize( subgroupSize_ ) - { - VULKAN_HPP_ASSERT( name_.size() < VK_MAX_DESCRIPTION_SIZE ); -# if defined( WIN32 ) - strncpy_s( name, VK_MAX_DESCRIPTION_SIZE, name_.data(), name_.size() ); -# else - strncpy( name, name_.data(), std::min( VK_MAX_DESCRIPTION_SIZE, name_.size() ) ); -# endif - - VULKAN_HPP_ASSERT( description_.size() < VK_MAX_DESCRIPTION_SIZE ); -# if defined( WIN32 ) - strncpy_s( description, VK_MAX_DESCRIPTION_SIZE, description_.data(), description_.size() ); -# else - strncpy( description, description_.data(), std::min( VK_MAX_DESCRIPTION_SIZE, description_.size() ) ); -# endif - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - PipelineExecutablePropertiesKHR & operator=( PipelineExecutablePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -93315,11 +96049,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticFormatKHR format_ = VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticFormatKHR::eBool32, VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticValueKHR value_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , name( name_ ) - , description( description_ ) - , format( format_ ) - , value( value_ ) + : pNext{ pNext_ } + , name{ name_ } + , description{ description_ } + , format{ format_ } + , value{ value_ } { } @@ -93330,31 +96064,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PipelineExecutableStatisticKHR( - std::string const & name_, - std::string const & description_ = {}, - VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticFormatKHR format_ = VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticFormatKHR::eBool32, - VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticValueKHR value_ = {}, - void * pNext_ = nullptr ) - : pNext( pNext_ ), format( format_ ), value( value_ ) - { - VULKAN_HPP_ASSERT( name_.size() < VK_MAX_DESCRIPTION_SIZE ); -# if defined( WIN32 ) - strncpy_s( name, VK_MAX_DESCRIPTION_SIZE, name_.data(), name_.size() ); -# else - strncpy( name, name_.data(), std::min( VK_MAX_DESCRIPTION_SIZE, name_.size() ) ); -# endif - - VULKAN_HPP_ASSERT( description_.size() < VK_MAX_DESCRIPTION_SIZE ); -# if defined( WIN32 ) - strncpy_s( description, VK_MAX_DESCRIPTION_SIZE, description_.data(), description_.size() ); -# else - strncpy( description, description_.data(), std::min( VK_MAX_DESCRIPTION_SIZE, description_.size() ) ); -# endif - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - PipelineExecutableStatisticKHR & operator=( PipelineExecutableStatisticKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -93420,10 +96129,10 @@ namespace VULKAN_HPP_NAMESPACE std::array const & combinerOps_ = { { VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR::eKeep, VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR::eKeep } }, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shadingRateType( shadingRateType_ ) - , shadingRate( shadingRate_ ) - , combinerOps( combinerOps_ ) + : pNext{ pNext_ } + , shadingRateType{ shadingRateType_ } + , shadingRate{ shadingRate_ } + , combinerOps{ combinerOps_ } { } @@ -93545,9 +96254,9 @@ namespace VULKAN_HPP_NAMESPACE std::array const & combinerOps_ = { { VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR::eKeep, VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR::eKeep } }, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fragmentSize( fragmentSize_ ) - , combinerOps( combinerOps_ ) + : pNext{ pNext_ } + , fragmentSize{ fragmentSize_ } + , combinerOps{ combinerOps_ } { } @@ -93658,9 +96367,9 @@ namespace VULKAN_HPP_NAMESPACE PipelineIndirectDeviceAddressInfoNV( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_ = VULKAN_HPP_NAMESPACE::PipelineBindPoint::eGraphics, VULKAN_HPP_NAMESPACE::Pipeline pipeline_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pipelineBindPoint( pipelineBindPoint_ ) - , pipeline( pipeline_ ) + : pNext{ pNext_ } + , pipelineBindPoint{ pipelineBindPoint_ } + , pipeline{ pipeline_ } { } @@ -93766,8 +96475,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PipelineInfoKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pipeline( pipeline_ ) + : pNext{ pNext_ } + , pipeline{ pipeline_ } { } @@ -93859,9 +96568,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PushConstantRange( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ = {}, uint32_t offset_ = {}, uint32_t size_ = {} ) VULKAN_HPP_NOEXCEPT - : stageFlags( stageFlags_ ) - , offset( offset_ ) - , size( size_ ) + : stageFlags{ stageFlags_ } + , offset{ offset_ } + , size{ size_ } { } @@ -93958,12 +96667,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t pushConstantRangeCount_ = {}, const VULKAN_HPP_NAMESPACE::PushConstantRange * pPushConstantRanges_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , setLayoutCount( setLayoutCount_ ) - , pSetLayouts( pSetLayouts_ ) - , pushConstantRangeCount( pushConstantRangeCount_ ) - , pPushConstantRanges( pPushConstantRanges_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , setLayoutCount{ setLayoutCount_ } + , pSetLayouts{ pSetLayouts_ } + , pushConstantRangeCount{ pushConstantRangeCount_ } + , pPushConstantRanges{ pPushConstantRanges_ } { } @@ -94131,8 +96840,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR_14 PipelinePropertiesIdentifierEXT( std::array const & pipelineIdentifier_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pipelineIdentifier( pipelineIdentifier_ ) + : pNext{ pNext_ } + , pipelineIdentifier{ pipelineIdentifier_ } { } @@ -94217,10 +96926,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ConservativeRasterizationModeEXT conservativeRasterizationMode_ = VULKAN_HPP_NAMESPACE::ConservativeRasterizationModeEXT::eDisabled, float extraPrimitiveOverestimationSize_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , conservativeRasterizationMode( conservativeRasterizationMode_ ) - , extraPrimitiveOverestimationSize( extraPrimitiveOverestimationSize_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , conservativeRasterizationMode{ conservativeRasterizationMode_ } + , extraPrimitiveOverestimationSize{ extraPrimitiveOverestimationSize_ } { } @@ -94342,9 +97051,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PipelineRasterizationDepthClipStateCreateInfoEXT( VULKAN_HPP_NAMESPACE::PipelineRasterizationDepthClipStateCreateFlagsEXT flags_ = {}, VULKAN_HPP_NAMESPACE::Bool32 depthClipEnable_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , depthClipEnable( depthClipEnable_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , depthClipEnable{ depthClipEnable_ } { } @@ -94457,11 +97166,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t lineStippleFactor_ = {}, uint16_t lineStipplePattern_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , lineRasterizationMode( lineRasterizationMode_ ) - , stippledLineEnable( stippledLineEnable_ ) - , lineStippleFactor( lineStippleFactor_ ) - , lineStipplePattern( lineStipplePattern_ ) + : pNext{ pNext_ } + , lineRasterizationMode{ lineRasterizationMode_ } + , stippledLineEnable{ stippledLineEnable_ } + , lineStippleFactor{ lineStippleFactor_ } + , lineStipplePattern{ lineStipplePattern_ } { } @@ -94590,8 +97299,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PipelineRasterizationProvokingVertexStateCreateInfoEXT( VULKAN_HPP_NAMESPACE::ProvokingVertexModeEXT provokingVertexMode_ = VULKAN_HPP_NAMESPACE::ProvokingVertexModeEXT::eFirstVertex, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , provokingVertexMode( provokingVertexMode_ ) + : pNext{ pNext_ } + , provokingVertexMode{ provokingVertexMode_ } { } @@ -94692,8 +97401,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PipelineRasterizationStateRasterizationOrderAMD( VULKAN_HPP_NAMESPACE::RasterizationOrderAMD rasterizationOrder_ = VULKAN_HPP_NAMESPACE::RasterizationOrderAMD::eStrict, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , rasterizationOrder( rasterizationOrder_ ) + : pNext{ pNext_ } + , rasterizationOrder{ rasterizationOrder_ } { } @@ -94792,9 +97501,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PipelineRasterizationStateStreamCreateInfoEXT( VULKAN_HPP_NAMESPACE::PipelineRasterizationStateStreamCreateFlagsEXT flags_ = {}, uint32_t rasterizationStream_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , rasterizationStream( rasterizationStream_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , rasterizationStream{ rasterizationStream_ } { } @@ -94906,12 +97615,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Format depthAttachmentFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::Format stencilAttachmentFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , viewMask( viewMask_ ) - , colorAttachmentCount( colorAttachmentCount_ ) - , pColorAttachmentFormats( pColorAttachmentFormats_ ) - , depthAttachmentFormat( depthAttachmentFormat_ ) - , stencilAttachmentFormat( stencilAttachmentFormat_ ) + : pNext{ pNext_ } + , viewMask{ viewMask_ } + , colorAttachmentCount{ colorAttachmentCount_ } + , pColorAttachmentFormats{ pColorAttachmentFormats_ } + , depthAttachmentFormat{ depthAttachmentFormat_ } + , stencilAttachmentFormat{ stencilAttachmentFormat_ } { } @@ -95073,8 +97782,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PipelineRepresentativeFragmentTestStateCreateInfoNV( VULKAN_HPP_NAMESPACE::Bool32 representativeFragmentTestEnable_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , representativeFragmentTestEnable( representativeFragmentTestEnable_ ) + : pNext{ pNext_ } + , representativeFragmentTestEnable{ representativeFragmentTestEnable_ } { } @@ -95177,11 +97886,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT vertexInputs_ = VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT::eDeviceDefault, VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT images_ = VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT::eDeviceDefault, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , storageBuffers( storageBuffers_ ) - , uniformBuffers( uniformBuffers_ ) - , vertexInputs( vertexInputs_ ) - , images( images_ ) + : pNext{ pNext_ } + , storageBuffers{ storageBuffers_ } + , uniformBuffers{ uniformBuffers_ } + , vertexInputs{ vertexInputs_ } + , images{ images_ } { } @@ -95308,9 +98017,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PipelineSampleLocationsStateCreateInfoEXT( VULKAN_HPP_NAMESPACE::Bool32 sampleLocationsEnable_ = {}, VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT sampleLocationsInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , sampleLocationsEnable( sampleLocationsEnable_ ) - , sampleLocationsInfo( sampleLocationsInfo_ ) + : pNext{ pNext_ } + , sampleLocationsEnable{ sampleLocationsEnable_ } + , sampleLocationsInfo{ sampleLocationsInfo_ } { } @@ -95420,9 +98129,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PipelineShaderStageModuleIdentifierCreateInfoEXT( uint32_t identifierSize_ = {}, const uint8_t * pIdentifier_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , identifierSize( identifierSize_ ) - , pIdentifier( pIdentifier_ ) + : pNext{ pNext_ } + , identifierSize{ identifierSize_ } + , pIdentifier{ pIdentifier_ } { } @@ -95545,9 +98254,9 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PipelineShaderStageNodeCreateInfoAMDX( const char * pName_ = {}, uint32_t index_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pName( pName_ ) - , index( index_ ) + : pNext{ pNext_ } + , pName{ pName_ } + , index{ index_ } { } @@ -95659,8 +98368,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PipelineShaderStageRequiredSubgroupSizeCreateInfo( uint32_t requiredSubgroupSize_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , requiredSubgroupSize( requiredSubgroupSize_ ) + : pNext{ pNext_ } + , requiredSubgroupSize{ requiredSubgroupSize_ } { } @@ -95748,8 +98457,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PipelineTessellationDomainOriginStateCreateInfo( VULKAN_HPP_NAMESPACE::TessellationDomainOrigin domainOrigin_ = VULKAN_HPP_NAMESPACE::TessellationDomainOrigin::eUpperLeft, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , domainOrigin( domainOrigin_ ) + : pNext{ pNext_ } + , domainOrigin{ domainOrigin_ } { } @@ -95845,8 +98554,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VertexInputBindingDivisorDescriptionKHR( uint32_t binding_ = {}, uint32_t divisor_ = {} ) VULKAN_HPP_NOEXCEPT - : binding( binding_ ) - , divisor( divisor_ ) + : binding{ binding_ } + , divisor{ divisor_ } { } @@ -95939,9 +98648,9 @@ namespace VULKAN_HPP_NAMESPACE PipelineVertexInputDivisorStateCreateInfoKHR( uint32_t vertexBindingDivisorCount_ = {}, const VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescriptionKHR * pVertexBindingDivisors_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , vertexBindingDivisorCount( vertexBindingDivisorCount_ ) - , pVertexBindingDivisors( pVertexBindingDivisors_ ) + : pNext{ pNext_ } + , vertexBindingDivisorCount{ vertexBindingDivisorCount_ } + , pVertexBindingDivisors{ pVertexBindingDivisors_ } { } @@ -96077,10 +98786,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t customSampleOrderCount_ = {}, const VULKAN_HPP_NAMESPACE::CoarseSampleOrderCustomNV * pCustomSampleOrders_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , sampleOrderType( sampleOrderType_ ) - , customSampleOrderCount( customSampleOrderCount_ ) - , pCustomSampleOrders( pCustomSampleOrders_ ) + : pNext{ pNext_ } + , sampleOrderType{ sampleOrderType_ } + , customSampleOrderCount{ customSampleOrderCount_ } + , pCustomSampleOrders{ pCustomSampleOrders_ } { } @@ -96223,8 +98932,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PipelineViewportDepthClipControlCreateInfoEXT( VULKAN_HPP_NAMESPACE::Bool32 negativeOneToOne_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , negativeOneToOne( negativeOneToOne_ ) + : pNext{ pNext_ } + , negativeOneToOne{ negativeOneToOne_ } { } @@ -96323,9 +99032,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PipelineViewportExclusiveScissorStateCreateInfoNV( uint32_t exclusiveScissorCount_ = {}, const VULKAN_HPP_NAMESPACE::Rect2D * pExclusiveScissors_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , exclusiveScissorCount( exclusiveScissorCount_ ) - , pExclusiveScissors( pExclusiveScissors_ ) + : pNext{ pNext_ } + , exclusiveScissorCount{ exclusiveScissorCount_ } + , pExclusiveScissors{ pExclusiveScissors_ } { } @@ -96447,8 +99156,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ShadingRatePaletteNV( uint32_t shadingRatePaletteEntryCount_ = {}, const VULKAN_HPP_NAMESPACE::ShadingRatePaletteEntryNV * pShadingRatePaletteEntries_ = {} ) VULKAN_HPP_NOEXCEPT - : shadingRatePaletteEntryCount( shadingRatePaletteEntryCount_ ) - , pShadingRatePaletteEntries( pShadingRatePaletteEntries_ ) + : shadingRatePaletteEntryCount{ shadingRatePaletteEntryCount_ } + , pShadingRatePaletteEntries{ pShadingRatePaletteEntries_ } { } @@ -96560,10 +99269,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t viewportCount_ = {}, const VULKAN_HPP_NAMESPACE::ShadingRatePaletteNV * pShadingRatePalettes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , shadingRateImageEnable( shadingRateImageEnable_ ) - , viewportCount( viewportCount_ ) - , pShadingRatePalettes( pShadingRatePalettes_ ) + : pNext{ pNext_ } + , shadingRateImageEnable{ shadingRateImageEnable_ } + , viewportCount{ viewportCount_ } + , pShadingRatePalettes{ pShadingRatePalettes_ } { } @@ -96705,10 +99414,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV y_ = VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV::ePositiveX, VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV z_ = VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV::ePositiveX, VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV w_ = VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV::ePositiveX ) VULKAN_HPP_NOEXCEPT - : x( x_ ) - , y( y_ ) - , z( z_ ) - , w( w_ ) + : x{ x_ } + , y{ y_ } + , z{ z_ } + , w{ w_ } { } @@ -96813,10 +99522,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t viewportCount_ = {}, const VULKAN_HPP_NAMESPACE::ViewportSwizzleNV * pViewportSwizzles_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , viewportCount( viewportCount_ ) - , pViewportSwizzles( pViewportSwizzles_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , viewportCount{ viewportCount_ } + , pViewportSwizzles{ pViewportSwizzles_ } { } @@ -96949,8 +99658,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ViewportWScalingNV( float xcoeff_ = {}, float ycoeff_ = {} ) VULKAN_HPP_NOEXCEPT - : xcoeff( xcoeff_ ) - , ycoeff( ycoeff_ ) + : xcoeff{ xcoeff_ } + , ycoeff{ ycoeff_ } { } @@ -97038,10 +99747,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t viewportCount_ = {}, const VULKAN_HPP_NAMESPACE::ViewportWScalingNV * pViewportWScalings_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , viewportWScalingEnable( viewportWScalingEnable_ ) - , viewportCount( viewportCount_ ) - , pViewportWScalings( pViewportWScalings_ ) + : pNext{ pNext_ } + , viewportWScalingEnable{ viewportWScalingEnable_ } + , viewportCount{ viewportCount_ } + , pViewportWScalings{ pViewportWScalings_ } { } @@ -97181,8 +99890,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PresentFrameTokenGGP( GgpFrameToken frameToken_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , frameToken( frameToken_ ) + : pNext{ pNext_ } + , frameToken{ frameToken_ } { } @@ -97284,9 +99993,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PresentIdKHR( uint32_t swapchainCount_ = {}, const uint64_t * pPresentIds_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , swapchainCount( swapchainCount_ ) - , pPresentIds( pPresentIds_ ) + : pNext{ pNext_ } + , swapchainCount{ swapchainCount_ } + , pPresentIds{ pPresentIds_ } { } @@ -97407,13 +100116,13 @@ namespace VULKAN_HPP_NAMESPACE const uint32_t * pImageIndices_ = {}, VULKAN_HPP_NAMESPACE::Result * pResults_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , waitSemaphoreCount( waitSemaphoreCount_ ) - , pWaitSemaphores( pWaitSemaphores_ ) - , swapchainCount( swapchainCount_ ) - , pSwapchains( pSwapchains_ ) - , pImageIndices( pImageIndices_ ) - , pResults( pResults_ ) + : pNext{ pNext_ } + , waitSemaphoreCount{ waitSemaphoreCount_ } + , pWaitSemaphores{ pWaitSemaphores_ } + , swapchainCount{ swapchainCount_ } + , pSwapchains{ pSwapchains_ } + , pImageIndices{ pImageIndices_ } + , pResults{ pResults_ } { } @@ -97620,9 +100329,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR RectLayerKHR( VULKAN_HPP_NAMESPACE::Offset2D offset_ = {}, VULKAN_HPP_NAMESPACE::Extent2D extent_ = {}, uint32_t layer_ = {} ) VULKAN_HPP_NOEXCEPT - : offset( offset_ ) - , extent( extent_ ) - , layer( layer_ ) + : offset{ offset_ } + , extent{ extent_ } + , layer{ layer_ } { } @@ -97713,8 +100422,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PresentRegionKHR( uint32_t rectangleCount_ = {}, const VULKAN_HPP_NAMESPACE::RectLayerKHR * pRectangles_ = {} ) VULKAN_HPP_NOEXCEPT - : rectangleCount( rectangleCount_ ) - , pRectangles( pRectangles_ ) + : rectangleCount{ rectangleCount_ } + , pRectangles{ pRectangles_ } { } @@ -97818,9 +100527,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PresentRegionsKHR( uint32_t swapchainCount_ = {}, const VULKAN_HPP_NAMESPACE::PresentRegionKHR * pRegions_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , swapchainCount( swapchainCount_ ) - , pRegions( pRegions_ ) + : pNext{ pNext_ } + , swapchainCount{ swapchainCount_ } + , pRegions{ pRegions_ } { } @@ -97934,8 +100643,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PresentTimeGOOGLE( uint32_t presentID_ = {}, uint64_t desiredPresentTime_ = {} ) VULKAN_HPP_NOEXCEPT - : presentID( presentID_ ) - , desiredPresentTime( desiredPresentTime_ ) + : presentID{ presentID_ } + , desiredPresentTime{ desiredPresentTime_ } { } @@ -98022,9 +100731,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR PresentTimesInfoGOOGLE( uint32_t swapchainCount_ = {}, const VULKAN_HPP_NAMESPACE::PresentTimeGOOGLE * pTimes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , swapchainCount( swapchainCount_ ) - , pTimes( pTimes_ ) + : pNext{ pNext_ } + , swapchainCount{ swapchainCount_ } + , pTimes{ pTimes_ } { } @@ -98145,8 +100854,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR PrivateDataSlotCreateInfo( VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateFlags flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , flags{ flags_ } { } @@ -98243,8 +100952,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ProtectedSubmitInfo( VULKAN_HPP_NAMESPACE::Bool32 protectedSubmit_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , protectedSubmit( protectedSubmit_ ) + : pNext{ pNext_ } + , protectedSubmit{ protectedSubmit_ } { } @@ -98343,12 +101052,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t size_ = {}, const void * pValues_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , layout( layout_ ) - , stageFlags( stageFlags_ ) - , offset( offset_ ) - , size( size_ ) - , pValues( pValues_ ) + : pNext{ pNext_ } + , layout{ layout_ } + , stageFlags{ stageFlags_ } + , offset{ offset_ } + , size{ size_ } + , pValues{ pValues_ } { } @@ -98513,15 +101222,15 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::DescriptorBufferInfo * pBufferInfo_ = {}, const VULKAN_HPP_NAMESPACE::BufferView * pTexelBufferView_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , dstSet( dstSet_ ) - , dstBinding( dstBinding_ ) - , dstArrayElement( dstArrayElement_ ) - , descriptorCount( descriptorCount_ ) - , descriptorType( descriptorType_ ) - , pImageInfo( pImageInfo_ ) - , pBufferInfo( pBufferInfo_ ) - , pTexelBufferView( pTexelBufferView_ ) + : pNext{ pNext_ } + , dstSet{ dstSet_ } + , dstBinding{ dstBinding_ } + , dstArrayElement{ dstArrayElement_ } + , descriptorCount{ descriptorCount_ } + , descriptorType{ descriptorType_ } + , pImageInfo{ pImageInfo_ } + , pBufferInfo{ pBufferInfo_ } + , pTexelBufferView{ pTexelBufferView_ } { } @@ -98741,12 +101450,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t descriptorWriteCount_ = {}, const VULKAN_HPP_NAMESPACE::WriteDescriptorSet * pDescriptorWrites_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stageFlags( stageFlags_ ) - , layout( layout_ ) - , set( set_ ) - , descriptorWriteCount( descriptorWriteCount_ ) - , pDescriptorWrites( pDescriptorWrites_ ) + : pNext{ pNext_ } + , stageFlags{ stageFlags_ } + , layout{ layout_ } + , set{ set_ } + , descriptorWriteCount{ descriptorWriteCount_ } + , pDescriptorWrites{ pDescriptorWrites_ } { } @@ -98907,11 +101616,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t set_ = {}, const void * pData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , descriptorUpdateTemplate( descriptorUpdateTemplate_ ) - , layout( layout_ ) - , set( set_ ) - , pData( pData_ ) + : pNext{ pNext_ } + , descriptorUpdateTemplate{ descriptorUpdateTemplate_ } + , layout{ layout_ } + , set{ set_ } + , pData{ pData_ } { } @@ -99034,8 +101743,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR QueryLowLatencySupportNV( void * pQueriedLowLatencyData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pQueriedLowLatencyData( pQueriedLowLatencyData_ ) + : pNext{ pNext_ } + , pQueriedLowLatencyData{ pQueriedLowLatencyData_ } { } @@ -99134,11 +101843,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t queryCount_ = {}, VULKAN_HPP_NAMESPACE::QueryPipelineStatisticFlags pipelineStatistics_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , queryType( queryType_ ) - , queryCount( queryCount_ ) - , pipelineStatistics( pipelineStatistics_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , queryType{ queryType_ } + , queryCount{ queryCount_ } + , pipelineStatistics{ pipelineStatistics_ } { } @@ -99263,10 +101972,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t counterIndexCount_ = {}, const uint32_t * pCounterIndices_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , queueFamilyIndex( queueFamilyIndex_ ) - , counterIndexCount( counterIndexCount_ ) - , pCounterIndices( pCounterIndices_ ) + : pNext{ pNext_ } + , queueFamilyIndex{ queueFamilyIndex_ } + , counterIndexCount{ counterIndexCount_ } + , pCounterIndices{ pCounterIndices_ } { } @@ -99400,8 +102109,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR QueryPoolPerformanceQueryCreateInfoINTEL( VULKAN_HPP_NAMESPACE::QueryPoolSamplingModeINTEL performanceCountersSampling_ = VULKAN_HPP_NAMESPACE::QueryPoolSamplingModeINTEL::eManual, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , performanceCountersSampling( performanceCountersSampling_ ) + : pNext{ pNext_ } + , performanceCountersSampling{ performanceCountersSampling_ } { } @@ -99500,8 +102209,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR QueryPoolVideoEncodeFeedbackCreateInfoKHR( VULKAN_HPP_NAMESPACE::VideoEncodeFeedbackFlagsKHR encodeFeedbackFlags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , encodeFeedbackFlags( encodeFeedbackFlags_ ) + : pNext{ pNext_ } + , encodeFeedbackFlags{ encodeFeedbackFlags_ } { } @@ -99598,8 +102307,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR QueueFamilyCheckpointProperties2NV( VULKAN_HPP_NAMESPACE::PipelineStageFlags2 checkpointExecutionStageMask_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , checkpointExecutionStageMask( checkpointExecutionStageMask_ ) + : pNext{ pNext_ } + , checkpointExecutionStageMask{ checkpointExecutionStageMask_ } { } @@ -99681,8 +102390,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR QueueFamilyCheckpointPropertiesNV( VULKAN_HPP_NAMESPACE::PipelineStageFlags checkpointExecutionStageMask_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , checkpointExecutionStageMask( checkpointExecutionStageMask_ ) + : pNext{ pNext_ } + , checkpointExecutionStageMask{ checkpointExecutionStageMask_ } { } @@ -99782,9 +102491,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow } }, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , priorityCount( priorityCount_ ) - , priorities( priorities_ ) + : pNext{ pNext_ } + , priorityCount{ priorityCount_ } + , priorities{ priorities_ } { } @@ -99795,16 +102504,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - QueueFamilyGlobalPriorityPropertiesKHR( VULKAN_HPP_NAMESPACE::ArrayProxy const & priorities_, - void * pNext_ = nullptr ) - : pNext( pNext_ ), priorityCount( std::min( static_cast( priorities_.size() ), VK_MAX_GLOBAL_PRIORITY_SIZE_KHR ) ) - { - VULKAN_HPP_ASSERT( priorities_.size() < VK_MAX_GLOBAL_PRIORITY_SIZE_KHR ); - memcpy( priorities, priorities_.data(), priorityCount * sizeof( VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR ) ); - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - QueueFamilyGlobalPriorityPropertiesKHR & operator=( QueueFamilyGlobalPriorityPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -99893,10 +102592,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t queueCount_ = {}, uint32_t timestampValidBits_ = {}, VULKAN_HPP_NAMESPACE::Extent3D minImageTransferGranularity_ = {} ) VULKAN_HPP_NOEXCEPT - : queueFlags( queueFlags_ ) - , queueCount( queueCount_ ) - , timestampValidBits( timestampValidBits_ ) - , minImageTransferGranularity( minImageTransferGranularity_ ) + : queueFlags{ queueFlags_ } + , queueCount{ queueCount_ } + , timestampValidBits{ timestampValidBits_ } + , minImageTransferGranularity{ minImageTransferGranularity_ } { } @@ -99974,8 +102673,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR QueueFamilyProperties2( VULKAN_HPP_NAMESPACE::QueueFamilyProperties queueFamilyProperties_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , queueFamilyProperties( queueFamilyProperties_ ) + : pNext{ pNext_ } + , queueFamilyProperties{ queueFamilyProperties_ } { } @@ -100059,8 +102758,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR QueueFamilyQueryResultStatusPropertiesKHR( VULKAN_HPP_NAMESPACE::Bool32 queryResultStatusSupport_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , queryResultStatusSupport( queryResultStatusSupport_ ) + : pNext{ pNext_ } + , queryResultStatusSupport{ queryResultStatusSupport_ } { } @@ -100142,8 +102841,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR QueueFamilyVideoPropertiesKHR( VULKAN_HPP_NAMESPACE::VideoCodecOperationFlagsKHR videoCodecOperations_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , videoCodecOperations( videoCodecOperations_ ) + : pNext{ pNext_ } + , videoCodecOperations{ videoCodecOperations_ } { } @@ -100231,13 +102930,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t intersectionShader_ = VULKAN_HPP_NAMESPACE::ShaderUnusedKHR, const void * pShaderGroupCaptureReplayHandle_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , type( type_ ) - , generalShader( generalShader_ ) - , closestHitShader( closestHitShader_ ) - , anyHitShader( anyHitShader_ ) - , intersectionShader( intersectionShader_ ) - , pShaderGroupCaptureReplayHandle( pShaderGroupCaptureReplayHandle_ ) + : pNext{ pNext_ } + , type{ type_ } + , generalShader{ generalShader_ } + , closestHitShader{ closestHitShader_ } + , anyHitShader{ anyHitShader_ } + , intersectionShader{ intersectionShader_ } + , pShaderGroupCaptureReplayHandle{ pShaderGroupCaptureReplayHandle_ } { } @@ -100379,9 +103078,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR RayTracingPipelineInterfaceCreateInfoKHR( uint32_t maxPipelineRayPayloadSize_ = {}, uint32_t maxPipelineRayHitAttributeSize_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxPipelineRayPayloadSize( maxPipelineRayPayloadSize_ ) - , maxPipelineRayHitAttributeSize( maxPipelineRayHitAttributeSize_ ) + : pNext{ pNext_ } + , maxPipelineRayPayloadSize{ maxPipelineRayPayloadSize_ } + , maxPipelineRayHitAttributeSize{ maxPipelineRayHitAttributeSize_ } { } @@ -100497,19 +103196,19 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , stageCount( stageCount_ ) - , pStages( pStages_ ) - , groupCount( groupCount_ ) - , pGroups( pGroups_ ) - , maxPipelineRayRecursionDepth( maxPipelineRayRecursionDepth_ ) - , pLibraryInfo( pLibraryInfo_ ) - , pLibraryInterface( pLibraryInterface_ ) - , pDynamicState( pDynamicState_ ) - , layout( layout_ ) - , basePipelineHandle( basePipelineHandle_ ) - , basePipelineIndex( basePipelineIndex_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , stageCount{ stageCount_ } + , pStages{ pStages_ } + , groupCount{ groupCount_ } + , pGroups{ pGroups_ } + , maxPipelineRayRecursionDepth{ maxPipelineRayRecursionDepth_ } + , pLibraryInfo{ pLibraryInfo_ } + , pLibraryInterface{ pLibraryInterface_ } + , pDynamicState{ pDynamicState_ } + , layout{ layout_ } + , basePipelineHandle{ basePipelineHandle_ } + , basePipelineIndex{ basePipelineIndex_ } { } @@ -100771,12 +103470,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t anyHitShader_ = VULKAN_HPP_NAMESPACE::ShaderUnusedNV, uint32_t intersectionShader_ = VULKAN_HPP_NAMESPACE::ShaderUnusedNV, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , type( type_ ) - , generalShader( generalShader_ ) - , closestHitShader( closestHitShader_ ) - , anyHitShader( anyHitShader_ ) - , intersectionShader( intersectionShader_ ) + : pNext{ pNext_ } + , type{ type_ } + , generalShader{ generalShader_ } + , closestHitShader{ closestHitShader_ } + , anyHitShader{ anyHitShader_ } + , intersectionShader{ intersectionShader_ } { } @@ -100915,16 +103614,16 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , stageCount( stageCount_ ) - , pStages( pStages_ ) - , groupCount( groupCount_ ) - , pGroups( pGroups_ ) - , maxRecursionDepth( maxRecursionDepth_ ) - , layout( layout_ ) - , basePipelineHandle( basePipelineHandle_ ) - , basePipelineIndex( basePipelineIndex_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , stageCount{ stageCount_ } + , pStages{ pStages_ } + , groupCount{ groupCount_ } + , pGroups{ pGroups_ } + , maxRecursionDepth{ maxRecursionDepth_ } + , layout{ layout_ } + , basePipelineHandle{ basePipelineHandle_ } + , basePipelineIndex{ basePipelineIndex_ } { } @@ -101129,7 +103828,7 @@ namespace VULKAN_HPP_NAMESPACE using NativeType = VkRefreshCycleDurationGOOGLE; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR RefreshCycleDurationGOOGLE( uint64_t refreshDuration_ = {} ) VULKAN_HPP_NOEXCEPT : refreshDuration( refreshDuration_ ) {} + VULKAN_HPP_CONSTEXPR RefreshCycleDurationGOOGLE( uint64_t refreshDuration_ = {} ) VULKAN_HPP_NOEXCEPT : refreshDuration{ refreshDuration_ } {} VULKAN_HPP_CONSTEXPR RefreshCycleDurationGOOGLE( RefreshCycleDurationGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT = default; @@ -101191,6 +103890,102 @@ namespace VULKAN_HPP_NAMESPACE uint64_t refreshDuration = {}; }; + struct ReleaseCapturedPipelineDataInfoKHR + { + using NativeType = VkReleaseCapturedPipelineDataInfoKHR; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eReleaseCapturedPipelineDataInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ReleaseCapturedPipelineDataInfoKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , pipeline{ pipeline_ } + { + } + + VULKAN_HPP_CONSTEXPR ReleaseCapturedPipelineDataInfoKHR( ReleaseCapturedPipelineDataInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ReleaseCapturedPipelineDataInfoKHR( VkReleaseCapturedPipelineDataInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + : ReleaseCapturedPipelineDataInfoKHR( *reinterpret_cast( &rhs ) ) + { + } + + ReleaseCapturedPipelineDataInfoKHR & operator=( ReleaseCapturedPipelineDataInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + ReleaseCapturedPipelineDataInfoKHR & operator=( VkReleaseCapturedPipelineDataInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 ReleaseCapturedPipelineDataInfoKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 ReleaseCapturedPipelineDataInfoKHR & setPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline_ ) VULKAN_HPP_NOEXCEPT + { + pipeline = pipeline_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkReleaseCapturedPipelineDataInfoKHR const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkReleaseCapturedPipelineDataInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, pipeline ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( ReleaseCapturedPipelineDataInfoKHR const & ) const = default; +#else + bool operator==( ReleaseCapturedPipelineDataInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( pipeline == rhs.pipeline ); +# endif + } + + bool operator!=( ReleaseCapturedPipelineDataInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eReleaseCapturedPipelineDataInfoKHR; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Pipeline pipeline = {}; + }; + + template <> + struct CppType + { + using Type = ReleaseCapturedPipelineDataInfoKHR; + }; + struct ReleaseSwapchainImagesInfoEXT { using NativeType = VkReleaseSwapchainImagesInfoEXT; @@ -101203,10 +103998,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t imageIndexCount_ = {}, const uint32_t * pImageIndices_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , swapchain( swapchain_ ) - , imageIndexCount( imageIndexCount_ ) - , pImageIndices( pImageIndices_ ) + : pNext{ pNext_ } + , swapchain{ swapchain_ } + , imageIndexCount{ imageIndexCount_ } + , pImageIndices{ pImageIndices_ } { } @@ -101340,9 +104135,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR RenderPassAttachmentBeginInfo( uint32_t attachmentCount_ = {}, const VULKAN_HPP_NAMESPACE::ImageView * pAttachments_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , attachmentCount( attachmentCount_ ) - , pAttachments( pAttachments_ ) + : pNext{ pNext_ } + , attachmentCount{ attachmentCount_ } + , pAttachments{ pAttachments_ } { } @@ -101469,12 +104264,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t clearValueCount_ = {}, const VULKAN_HPP_NAMESPACE::ClearValue * pClearValues_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , renderPass( renderPass_ ) - , framebuffer( framebuffer_ ) - , renderArea( renderArea_ ) - , clearValueCount( clearValueCount_ ) - , pClearValues( pClearValues_ ) + : pNext{ pNext_ } + , renderPass{ renderPass_ } + , framebuffer{ framebuffer_ } + , renderArea{ renderArea_ } + , clearValueCount{ clearValueCount_ } + , pClearValues{ pClearValues_ } { } @@ -101635,16 +104430,16 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::AttachmentReference * pDepthStencilAttachment_ = {}, uint32_t preserveAttachmentCount_ = {}, const uint32_t * pPreserveAttachments_ = {} ) VULKAN_HPP_NOEXCEPT - : flags( flags_ ) - , pipelineBindPoint( pipelineBindPoint_ ) - , inputAttachmentCount( inputAttachmentCount_ ) - , pInputAttachments( pInputAttachments_ ) - , colorAttachmentCount( colorAttachmentCount_ ) - , pColorAttachments( pColorAttachments_ ) - , pResolveAttachments( pResolveAttachments_ ) - , pDepthStencilAttachment( pDepthStencilAttachment_ ) - , preserveAttachmentCount( preserveAttachmentCount_ ) - , pPreserveAttachments( pPreserveAttachments_ ) + : flags{ flags_ } + , pipelineBindPoint{ pipelineBindPoint_ } + , inputAttachmentCount{ inputAttachmentCount_ } + , pInputAttachments{ pInputAttachments_ } + , colorAttachmentCount{ colorAttachmentCount_ } + , pColorAttachments{ pColorAttachments_ } + , pResolveAttachments{ pResolveAttachments_ } + , pDepthStencilAttachment{ pDepthStencilAttachment_ } + , preserveAttachmentCount{ preserveAttachmentCount_ } + , pPreserveAttachments{ pPreserveAttachments_ } { } @@ -101886,13 +104681,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask_ = {}, VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask_ = {}, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags_ = {} ) VULKAN_HPP_NOEXCEPT - : srcSubpass( srcSubpass_ ) - , dstSubpass( dstSubpass_ ) - , srcStageMask( srcStageMask_ ) - , dstStageMask( dstStageMask_ ) - , srcAccessMask( srcAccessMask_ ) - , dstAccessMask( dstAccessMask_ ) - , dependencyFlags( dependencyFlags_ ) + : srcSubpass{ srcSubpass_ } + , dstSubpass{ dstSubpass_ } + , srcStageMask{ srcStageMask_ } + , dstStageMask{ dstStageMask_ } + , srcAccessMask{ srcAccessMask_ } + , dstAccessMask{ dstAccessMask_ } + , dependencyFlags{ dependencyFlags_ } { } @@ -102027,14 +104822,14 @@ namespace VULKAN_HPP_NAMESPACE uint32_t dependencyCount_ = {}, const VULKAN_HPP_NAMESPACE::SubpassDependency * pDependencies_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , attachmentCount( attachmentCount_ ) - , pAttachments( pAttachments_ ) - , subpassCount( subpassCount_ ) - , pSubpasses( pSubpasses_ ) - , dependencyCount( dependencyCount_ ) - , pDependencies( pDependencies_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , attachmentCount{ attachmentCount_ } + , pAttachments{ pAttachments_ } + , subpassCount{ subpassCount_ } + , pSubpasses{ pSubpasses_ } + , dependencyCount{ dependencyCount_ } + , pDependencies{ pDependencies_ } { } @@ -102240,18 +105035,18 @@ namespace VULKAN_HPP_NAMESPACE uint32_t preserveAttachmentCount_ = {}, const uint32_t * pPreserveAttachments_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , pipelineBindPoint( pipelineBindPoint_ ) - , viewMask( viewMask_ ) - , inputAttachmentCount( inputAttachmentCount_ ) - , pInputAttachments( pInputAttachments_ ) - , colorAttachmentCount( colorAttachmentCount_ ) - , pColorAttachments( pColorAttachments_ ) - , pResolveAttachments( pResolveAttachments_ ) - , pDepthStencilAttachment( pDepthStencilAttachment_ ) - , preserveAttachmentCount( preserveAttachmentCount_ ) - , pPreserveAttachments( pPreserveAttachments_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , pipelineBindPoint{ pipelineBindPoint_ } + , viewMask{ viewMask_ } + , inputAttachmentCount{ inputAttachmentCount_ } + , pInputAttachments{ pInputAttachments_ } + , colorAttachmentCount{ colorAttachmentCount_ } + , pColorAttachments{ pColorAttachments_ } + , pResolveAttachments{ pResolveAttachments_ } + , pDepthStencilAttachment{ pDepthStencilAttachment_ } + , preserveAttachmentCount{ preserveAttachmentCount_ } + , pPreserveAttachments{ pPreserveAttachments_ } { } @@ -102533,15 +105328,15 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags_ = {}, int32_t viewOffset_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcSubpass( srcSubpass_ ) - , dstSubpass( dstSubpass_ ) - , srcStageMask( srcStageMask_ ) - , dstStageMask( dstStageMask_ ) - , srcAccessMask( srcAccessMask_ ) - , dstAccessMask( dstAccessMask_ ) - , dependencyFlags( dependencyFlags_ ) - , viewOffset( viewOffset_ ) + : pNext{ pNext_ } + , srcSubpass{ srcSubpass_ } + , dstSubpass{ dstSubpass_ } + , srcStageMask{ srcStageMask_ } + , dstStageMask{ dstStageMask_ } + , srcAccessMask{ srcAccessMask_ } + , dstAccessMask{ dstAccessMask_ } + , dependencyFlags{ dependencyFlags_ } + , viewOffset{ viewOffset_ } { } @@ -102704,16 +105499,16 @@ namespace VULKAN_HPP_NAMESPACE uint32_t correlatedViewMaskCount_ = {}, const uint32_t * pCorrelatedViewMasks_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , attachmentCount( attachmentCount_ ) - , pAttachments( pAttachments_ ) - , subpassCount( subpassCount_ ) - , pSubpasses( pSubpasses_ ) - , dependencyCount( dependencyCount_ ) - , pDependencies( pDependencies_ ) - , correlatedViewMaskCount( correlatedViewMaskCount_ ) - , pCorrelatedViewMasks( pCorrelatedViewMasks_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , attachmentCount{ attachmentCount_ } + , pAttachments{ pAttachments_ } + , subpassCount{ subpassCount_ } + , pSubpasses{ pSubpasses_ } + , dependencyCount{ dependencyCount_ } + , pDependencies{ pDependencies_ } + , correlatedViewMaskCount{ correlatedViewMaskCount_ } + , pCorrelatedViewMasks{ pCorrelatedViewMasks_ } { } @@ -102950,8 +105745,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR RenderPassCreationControlEXT( VULKAN_HPP_NAMESPACE::Bool32 disallowMerging_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , disallowMerging( disallowMerging_ ) + : pNext{ pNext_ } + , disallowMerging{ disallowMerging_ } { } @@ -103043,7 +105838,7 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR RenderPassCreationFeedbackInfoEXT( uint32_t postMergeSubpassCount_ = {} ) VULKAN_HPP_NOEXCEPT - : postMergeSubpassCount( postMergeSubpassCount_ ) + : postMergeSubpassCount{ postMergeSubpassCount_ } { } @@ -103117,8 +105912,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR RenderPassCreationFeedbackCreateInfoEXT( VULKAN_HPP_NAMESPACE::RenderPassCreationFeedbackInfoEXT * pRenderPassFeedback_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pRenderPassFeedback( pRenderPassFeedback_ ) + : pNext{ pNext_ } + , pRenderPassFeedback{ pRenderPassFeedback_ } { } @@ -103215,8 +106010,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR RenderPassFragmentDensityMapCreateInfoEXT( VULKAN_HPP_NAMESPACE::AttachmentReference fragmentDensityMapAttachment_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fragmentDensityMapAttachment( fragmentDensityMapAttachment_ ) + : pNext{ pNext_ } + , fragmentDensityMapAttachment{ fragmentDensityMapAttachment_ } { } @@ -103314,9 +106109,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR RenderPassInputAttachmentAspectCreateInfo( uint32_t aspectReferenceCount_ = {}, const VULKAN_HPP_NAMESPACE::InputAttachmentAspectReference * pAspectReferences_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , aspectReferenceCount( aspectReferenceCount_ ) - , pAspectReferences( pAspectReferences_ ) + : pNext{ pNext_ } + , aspectReferenceCount{ aspectReferenceCount_ } + , pAspectReferences{ pAspectReferences_ } { } @@ -103450,13 +106245,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t correlationMaskCount_ = {}, const uint32_t * pCorrelationMasks_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , subpassCount( subpassCount_ ) - , pViewMasks( pViewMasks_ ) - , dependencyCount( dependencyCount_ ) - , pViewOffsets( pViewOffsets_ ) - , correlationMaskCount( correlationMaskCount_ ) - , pCorrelationMasks( pCorrelationMasks_ ) + : pNext{ pNext_ } + , subpassCount{ subpassCount_ } + , pViewMasks{ pViewMasks_ } + , dependencyCount{ dependencyCount_ } + , pViewOffsets{ pViewOffsets_ } + , correlationMaskCount{ correlationMaskCount_ } + , pCorrelationMasks{ pCorrelationMasks_ } { } @@ -103639,8 +106434,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SubpassSampleLocationsEXT( uint32_t subpassIndex_ = {}, VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT sampleLocationsInfo_ = {} ) VULKAN_HPP_NOEXCEPT - : subpassIndex( subpassIndex_ ) - , sampleLocationsInfo( sampleLocationsInfo_ ) + : subpassIndex{ subpassIndex_ } + , sampleLocationsInfo{ sampleLocationsInfo_ } { } @@ -103734,11 +106529,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t postSubpassSampleLocationsCount_ = {}, const VULKAN_HPP_NAMESPACE::SubpassSampleLocationsEXT * pPostSubpassSampleLocations_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , attachmentInitialSampleLocationsCount( attachmentInitialSampleLocationsCount_ ) - , pAttachmentInitialSampleLocations( pAttachmentInitialSampleLocations_ ) - , postSubpassSampleLocationsCount( postSubpassSampleLocationsCount_ ) - , pPostSubpassSampleLocations( pPostSubpassSampleLocations_ ) + : pNext{ pNext_ } + , attachmentInitialSampleLocationsCount{ attachmentInitialSampleLocationsCount_ } + , pAttachmentInitialSampleLocations{ pAttachmentInitialSampleLocations_ } + , postSubpassSampleLocationsCount{ postSubpassSampleLocationsCount_ } + , pPostSubpassSampleLocations{ pPostSubpassSampleLocations_ } { } @@ -103902,8 +106697,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR RenderPassStripeInfoARM( VULKAN_HPP_NAMESPACE::Rect2D stripeArea_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stripeArea( stripeArea_ ) + : pNext{ pNext_ } + , stripeArea{ stripeArea_ } { } @@ -104000,9 +106795,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR RenderPassStripeBeginInfoARM( uint32_t stripeInfoCount_ = {}, const VULKAN_HPP_NAMESPACE::RenderPassStripeInfoARM * pStripeInfos_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stripeInfoCount( stripeInfoCount_ ) - , pStripeInfos( pStripeInfos_ ) + : pNext{ pNext_ } + , stripeInfoCount{ stripeInfoCount_ } + , pStripeInfos{ pStripeInfos_ } { } @@ -104128,11 +106923,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::PipelineStageFlags2 stageMask_ = {}, uint32_t deviceIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , semaphore( semaphore_ ) - , value( value_ ) - , stageMask( stageMask_ ) - , deviceIndex( deviceIndex_ ) + : pNext{ pNext_ } + , semaphore{ semaphore_ } + , value{ value_ } + , stageMask{ stageMask_ } + , deviceIndex{ deviceIndex_ } { } @@ -104257,9 +107052,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR RenderPassStripeSubmitInfoARM( uint32_t stripeSemaphoreInfoCount_ = {}, const VULKAN_HPP_NAMESPACE::SemaphoreSubmitInfo * pStripeSemaphoreInfos_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stripeSemaphoreInfoCount( stripeSemaphoreInfoCount_ ) - , pStripeSemaphoreInfos( pStripeSemaphoreInfos_ ) + : pNext{ pNext_ } + , stripeSemaphoreInfoCount{ stripeSemaphoreInfoCount_ } + , pStripeSemaphoreInfos{ pStripeSemaphoreInfos_ } { } @@ -104383,9 +107178,9 @@ namespace VULKAN_HPP_NAMESPACE RenderPassSubpassFeedbackInfoEXT( VULKAN_HPP_NAMESPACE::SubpassMergeStatusEXT subpassMergeStatus_ = VULKAN_HPP_NAMESPACE::SubpassMergeStatusEXT::eMerged, std::array const & description_ = {}, uint32_t postMergeIndex_ = {} ) VULKAN_HPP_NOEXCEPT - : subpassMergeStatus( subpassMergeStatus_ ) - , description( description_ ) - , postMergeIndex( postMergeIndex_ ) + : subpassMergeStatus{ subpassMergeStatus_ } + , description{ description_ } + , postMergeIndex{ postMergeIndex_ } { } @@ -104396,21 +107191,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - RenderPassSubpassFeedbackInfoEXT( VULKAN_HPP_NAMESPACE::SubpassMergeStatusEXT subpassMergeStatus_, - std::string const & description_, - uint32_t postMergeIndex_ = {} ) - : subpassMergeStatus( subpassMergeStatus_ ), postMergeIndex( postMergeIndex_ ) - { - VULKAN_HPP_ASSERT( description_.size() < VK_MAX_DESCRIPTION_SIZE ); -# if defined( WIN32 ) - strncpy_s( description, VK_MAX_DESCRIPTION_SIZE, description_.data(), description_.size() ); -# else - strncpy( description, description_.data(), std::min( VK_MAX_DESCRIPTION_SIZE, description_.size() ) ); -# endif - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - RenderPassSubpassFeedbackInfoEXT & operator=( RenderPassSubpassFeedbackInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -104483,8 +107263,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR_14 RenderPassSubpassFeedbackCreateInfoEXT( VULKAN_HPP_NAMESPACE::RenderPassSubpassFeedbackInfoEXT * pSubpassFeedback_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pSubpassFeedback( pSubpassFeedback_ ) + : pNext{ pNext_ } + , pSubpassFeedback{ pSubpassFeedback_ } { } @@ -104582,8 +107362,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR RenderPassTransformBeginInfoQCOM( VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform_ = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , transform( transform_ ) + : pNext{ pNext_ } + , transform{ transform_ } { } @@ -104683,12 +107463,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Format depthAttachmentFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::Format stencilAttachmentFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , viewMask( viewMask_ ) - , colorAttachmentCount( colorAttachmentCount_ ) - , pColorAttachmentFormats( pColorAttachmentFormats_ ) - , depthAttachmentFormat( depthAttachmentFormat_ ) - , stencilAttachmentFormat( stencilAttachmentFormat_ ) + : pNext{ pNext_ } + , viewMask{ viewMask_ } + , colorAttachmentCount{ colorAttachmentCount_ } + , pColorAttachmentFormats{ pColorAttachmentFormats_ } + , depthAttachmentFormat{ depthAttachmentFormat_ } + , stencilAttachmentFormat{ stencilAttachmentFormat_ } { } @@ -104854,15 +107634,15 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::AttachmentStoreOp storeOp_ = VULKAN_HPP_NAMESPACE::AttachmentStoreOp::eStore, VULKAN_HPP_NAMESPACE::ClearValue clearValue_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageView( imageView_ ) - , imageLayout( imageLayout_ ) - , resolveMode( resolveMode_ ) - , resolveImageView( resolveImageView_ ) - , resolveImageLayout( resolveImageLayout_ ) - , loadOp( loadOp_ ) - , storeOp( storeOp_ ) - , clearValue( clearValue_ ) + : pNext{ pNext_ } + , imageView{ imageView_ } + , imageLayout{ imageLayout_ } + , resolveMode{ resolveMode_ } + , resolveImageView{ resolveImageView_ } + , resolveImageLayout{ resolveImageLayout_ } + , loadOp{ loadOp_ } + , storeOp{ storeOp_ } + , clearValue{ clearValue_ } { } @@ -105001,9 +107781,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR RenderingAttachmentLocationInfoKHR( uint32_t colorAttachmentCount_ = {}, const uint32_t * pColorAttachmentLocations_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , colorAttachmentCount( colorAttachmentCount_ ) - , pColorAttachmentLocations( pColorAttachmentLocations_ ) + : pNext{ pNext_ } + , colorAttachmentCount{ colorAttachmentCount_ } + , pColorAttachmentLocations{ pColorAttachmentLocations_ } { } @@ -105129,9 +107909,9 @@ namespace VULKAN_HPP_NAMESPACE RenderingFragmentDensityMapAttachmentInfoEXT( VULKAN_HPP_NAMESPACE::ImageView imageView_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageView( imageView_ ) - , imageLayout( imageLayout_ ) + : pNext{ pNext_ } + , imageView{ imageView_ } + , imageLayout{ imageLayout_ } { } @@ -105240,10 +108020,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageLayout imageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::Extent2D shadingRateAttachmentTexelSize_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , imageView( imageView_ ) - , imageLayout( imageLayout_ ) - , shadingRateAttachmentTexelSize( shadingRateAttachmentTexelSize_ ) + : pNext{ pNext_ } + , imageView{ imageView_ } + , imageLayout{ imageLayout_ } + , shadingRateAttachmentTexelSize{ shadingRateAttachmentTexelSize_ } { } @@ -105367,15 +108147,15 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::RenderingAttachmentInfo * pDepthAttachment_ = {}, const VULKAN_HPP_NAMESPACE::RenderingAttachmentInfo * pStencilAttachment_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , renderArea( renderArea_ ) - , layerCount( layerCount_ ) - , viewMask( viewMask_ ) - , colorAttachmentCount( colorAttachmentCount_ ) - , pColorAttachments( pColorAttachments_ ) - , pDepthAttachment( pDepthAttachment_ ) - , pStencilAttachment( pStencilAttachment_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , renderArea{ renderArea_ } + , layerCount{ layerCount_ } + , viewMask{ viewMask_ } + , colorAttachmentCount{ colorAttachmentCount_ } + , pColorAttachments{ pColorAttachments_ } + , pDepthAttachment{ pDepthAttachment_ } + , pStencilAttachment{ pStencilAttachment_ } { } @@ -105566,11 +108346,11 @@ namespace VULKAN_HPP_NAMESPACE const uint32_t * pDepthInputAttachmentIndex_ = {}, const uint32_t * pStencilInputAttachmentIndex_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , colorAttachmentCount( colorAttachmentCount_ ) - , pColorAttachmentInputIndices( pColorAttachmentInputIndices_ ) - , pDepthInputAttachmentIndex( pDepthInputAttachmentIndex_ ) - , pStencilInputAttachmentIndex( pStencilInputAttachmentIndex_ ) + : pNext{ pNext_ } + , colorAttachmentCount{ colorAttachmentCount_ } + , pColorAttachmentInputIndices{ pColorAttachmentInputIndices_ } + , pDepthInputAttachmentIndex{ pDepthInputAttachmentIndex_ } + , pStencilInputAttachmentIndex{ pStencilInputAttachmentIndex_ } { } @@ -105726,13 +108506,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::ImageResolve2 * pRegions_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , srcImage( srcImage_ ) - , srcImageLayout( srcImageLayout_ ) - , dstImage( dstImage_ ) - , dstImageLayout( dstImageLayout_ ) - , regionCount( regionCount_ ) - , pRegions( pRegions_ ) + : pNext{ pNext_ } + , srcImage{ srcImage_ } + , srcImageLayout{ srcImageLayout_ } + , dstImage{ dstImage_ } + , dstImageLayout{ dstImageLayout_ } + , regionCount{ regionCount_ } + , pRegions{ pRegions_ } { } @@ -105900,9 +108680,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Extent2D windowExtent_ = {}, VULKAN_HPP_NAMESPACE::BlockMatchWindowCompareModeQCOM windowCompareMode_ = VULKAN_HPP_NAMESPACE::BlockMatchWindowCompareModeQCOM::eMin, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , windowExtent( windowExtent_ ) - , windowCompareMode( windowCompareMode_ ) + : pNext{ pNext_ } + , windowExtent{ windowExtent_ } + , windowCompareMode{ windowCompareMode_ } { } @@ -106010,9 +108790,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SamplerBorderColorComponentMappingCreateInfoEXT( VULKAN_HPP_NAMESPACE::ComponentMapping components_ = {}, VULKAN_HPP_NAMESPACE::Bool32 srgb_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , components( components_ ) - , srgb( srgb_ ) + : pNext{ pNext_ } + , components{ components_ } + , srgb{ srgb_ } { } @@ -106119,8 +108899,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SamplerCaptureDescriptorDataInfoEXT( VULKAN_HPP_NAMESPACE::Sampler sampler_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , sampler( sampler_ ) + : pNext{ pNext_ } + , sampler{ sampler_ } { } @@ -106231,23 +109011,23 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::BorderColor borderColor_ = VULKAN_HPP_NAMESPACE::BorderColor::eFloatTransparentBlack, VULKAN_HPP_NAMESPACE::Bool32 unnormalizedCoordinates_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , magFilter( magFilter_ ) - , minFilter( minFilter_ ) - , mipmapMode( mipmapMode_ ) - , addressModeU( addressModeU_ ) - , addressModeV( addressModeV_ ) - , addressModeW( addressModeW_ ) - , mipLodBias( mipLodBias_ ) - , anisotropyEnable( anisotropyEnable_ ) - , maxAnisotropy( maxAnisotropy_ ) - , compareEnable( compareEnable_ ) - , compareOp( compareOp_ ) - , minLod( minLod_ ) - , maxLod( maxLod_ ) - , borderColor( borderColor_ ) - , unnormalizedCoordinates( unnormalizedCoordinates_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , magFilter{ magFilter_ } + , minFilter{ minFilter_ } + , mipmapMode{ mipmapMode_ } + , addressModeU{ addressModeU_ } + , addressModeV{ addressModeV_ } + , addressModeW{ addressModeW_ } + , mipLodBias{ mipLodBias_ } + , anisotropyEnable{ anisotropyEnable_ } + , maxAnisotropy{ maxAnisotropy_ } + , compareEnable{ compareEnable_ } + , compareOp{ compareOp_ } + , minLod{ minLod_ } + , maxLod{ maxLod_ } + , borderColor{ borderColor_ } + , unnormalizedCoordinates{ unnormalizedCoordinates_ } { } @@ -106484,8 +109264,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SamplerCubicWeightsCreateInfoQCOM( VULKAN_HPP_NAMESPACE::CubicFilterWeightsQCOM cubicWeights_ = VULKAN_HPP_NAMESPACE::CubicFilterWeightsQCOM::eCatmullRom, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , cubicWeights( cubicWeights_ ) + : pNext{ pNext_ } + , cubicWeights{ cubicWeights_ } { } @@ -106583,9 +109363,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_14 SamplerCustomBorderColorCreateInfoEXT( VULKAN_HPP_NAMESPACE::ClearColorValue customBorderColor_ = {}, VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , customBorderColor( customBorderColor_ ) - , format( format_ ) + : pNext{ pNext_ } + , customBorderColor{ customBorderColor_ } + , format{ format_ } { } @@ -106675,8 +109455,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SamplerReductionModeCreateInfo( VULKAN_HPP_NAMESPACE::SamplerReductionMode reductionMode_ = VULKAN_HPP_NAMESPACE::SamplerReductionMode::eWeightedAverage, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , reductionMode( reductionMode_ ) + : pNext{ pNext_ } + , reductionMode{ reductionMode_ } { } @@ -106782,15 +109562,15 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Filter chromaFilter_ = VULKAN_HPP_NAMESPACE::Filter::eNearest, VULKAN_HPP_NAMESPACE::Bool32 forceExplicitReconstruction_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , format( format_ ) - , ycbcrModel( ycbcrModel_ ) - , ycbcrRange( ycbcrRange_ ) - , components( components_ ) - , xChromaOffset( xChromaOffset_ ) - , yChromaOffset( yChromaOffset_ ) - , chromaFilter( chromaFilter_ ) - , forceExplicitReconstruction( forceExplicitReconstruction_ ) + : pNext{ pNext_ } + , format{ format_ } + , ycbcrModel{ ycbcrModel_ } + , ycbcrRange{ ycbcrRange_ } + , components{ components_ } + , xChromaOffset{ xChromaOffset_ } + , yChromaOffset{ yChromaOffset_ } + , chromaFilter{ chromaFilter_ } + , forceExplicitReconstruction{ forceExplicitReconstruction_ } { } @@ -106951,8 +109731,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SamplerYcbcrConversionImageFormatProperties( uint32_t combinedImageSamplerDescriptorCount_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , combinedImageSamplerDescriptorCount( combinedImageSamplerDescriptorCount_ ) + : pNext{ pNext_ } + , combinedImageSamplerDescriptorCount{ combinedImageSamplerDescriptorCount_ } { } @@ -107036,8 +109816,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SamplerYcbcrConversionInfo( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion conversion_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , conversion( conversion_ ) + : pNext{ pNext_ } + , conversion{ conversion_ } { } @@ -107136,9 +109916,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SamplerYcbcrConversionYcbcrDegammaCreateInfoQCOM( VULKAN_HPP_NAMESPACE::Bool32 enableYDegamma_ = {}, VULKAN_HPP_NAMESPACE::Bool32 enableCbCrDegamma_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , enableYDegamma( enableYDegamma_ ) - , enableCbCrDegamma( enableCbCrDegamma_ ) + : pNext{ pNext_ } + , enableYDegamma{ enableYDegamma_ } + , enableCbCrDegamma{ enableCbCrDegamma_ } { } @@ -107254,16 +110034,16 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ChromaLocation suggestedXChromaOffset_ = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven, VULKAN_HPP_NAMESPACE::ChromaLocation suggestedYChromaOffset_ = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , format( format_ ) - , externalFormat( externalFormat_ ) - , screenUsage( screenUsage_ ) - , formatFeatures( formatFeatures_ ) - , samplerYcbcrConversionComponents( samplerYcbcrConversionComponents_ ) - , suggestedYcbcrModel( suggestedYcbcrModel_ ) - , suggestedYcbcrRange( suggestedYcbcrRange_ ) - , suggestedXChromaOffset( suggestedXChromaOffset_ ) - , suggestedYChromaOffset( suggestedYChromaOffset_ ) + : pNext{ pNext_ } + , format{ format_ } + , externalFormat{ externalFormat_ } + , screenUsage{ screenUsage_ } + , formatFeatures{ formatFeatures_ } + , samplerYcbcrConversionComponents{ samplerYcbcrConversionComponents_ } + , suggestedYcbcrModel{ suggestedYcbcrModel_ } + , suggestedYcbcrRange{ suggestedYcbcrRange_ } + , suggestedXChromaOffset{ suggestedXChromaOffset_ } + , suggestedYChromaOffset{ suggestedYChromaOffset_ } { } @@ -107380,9 +110160,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ScreenBufferPropertiesQNX( VULKAN_HPP_NAMESPACE::DeviceSize allocationSize_ = {}, uint32_t memoryTypeBits_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , allocationSize( allocationSize_ ) - , memoryTypeBits( memoryTypeBits_ ) + : pNext{ pNext_ } + , allocationSize{ allocationSize_ } + , memoryTypeBits{ memoryTypeBits_ } { } @@ -107469,10 +110249,10 @@ namespace VULKAN_HPP_NAMESPACE struct _screen_context * context_ = {}, struct _screen_window * window_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , context( context_ ) - , window( window_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , context{ context_ } + , window{ window_ } { } @@ -107586,8 +110366,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SemaphoreCreateInfo( VULKAN_HPP_NAMESPACE::SemaphoreCreateFlags flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , flags{ flags_ } { } @@ -107684,9 +110464,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Semaphore semaphore_ = {}, VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , semaphore( semaphore_ ) - , handleType( handleType_ ) + : pNext{ pNext_ } + , semaphore{ semaphore_ } + , handleType{ handleType_ } { } @@ -107795,9 +110575,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Semaphore semaphore_ = {}, VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , semaphore( semaphore_ ) - , handleType( handleType_ ) + : pNext{ pNext_ } + , semaphore{ semaphore_ } + , handleType{ handleType_ } { } @@ -107908,9 +110688,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Semaphore semaphore_ = {}, VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , semaphore( semaphore_ ) - , handleType( handleType_ ) + : pNext{ pNext_ } + , semaphore{ semaphore_ } + , handleType{ handleType_ } { } @@ -108018,9 +110798,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SemaphoreSignalInfo( VULKAN_HPP_NAMESPACE::Semaphore semaphore_ = {}, uint64_t value_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , semaphore( semaphore_ ) - , value( value_ ) + : pNext{ pNext_ } + , semaphore{ semaphore_ } + , value{ value_ } { } @@ -108125,9 +110905,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SemaphoreTypeCreateInfo( VULKAN_HPP_NAMESPACE::SemaphoreType semaphoreType_ = VULKAN_HPP_NAMESPACE::SemaphoreType::eBinary, uint64_t initialValue_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , semaphoreType( semaphoreType_ ) - , initialValue( initialValue_ ) + : pNext{ pNext_ } + , semaphoreType{ semaphoreType_ } + , initialValue{ initialValue_ } { } @@ -108235,11 +111015,11 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::Semaphore * pSemaphores_ = {}, const uint64_t * pValues_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , semaphoreCount( semaphoreCount_ ) - , pSemaphores( pSemaphores_ ) - , pValues( pValues_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , semaphoreCount{ semaphoreCount_ } + , pSemaphores{ pSemaphores_ } + , pValues{ pValues_ } { } @@ -108407,13 +111187,13 @@ namespace VULKAN_HPP_NAMESPACE const uint32_t * pBufferIndices_ = {}, const VULKAN_HPP_NAMESPACE::DeviceSize * pOffsets_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stageFlags( stageFlags_ ) - , layout( layout_ ) - , firstSet( firstSet_ ) - , setCount( setCount_ ) - , pBufferIndices( pBufferIndices_ ) - , pOffsets( pOffsets_ ) + : pNext{ pNext_ } + , stageFlags{ stageFlags_ } + , layout{ layout_ } + , firstSet{ firstSet_ } + , setCount{ setCount_ } + , pBufferIndices{ pBufferIndices_ } + , pOffsets{ pOffsets_ } { } @@ -108600,9 +111380,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SetLatencyMarkerInfoNV( uint64_t presentID_ = {}, VULKAN_HPP_NAMESPACE::LatencyMarkerNV marker_ = VULKAN_HPP_NAMESPACE::LatencyMarkerNV::eSimulationStart, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , presentID( presentID_ ) - , marker( marker_ ) + : pNext{ pNext_ } + , presentID{ presentID_ } + , marker{ marker_ } { } @@ -108700,7 +111480,7 @@ namespace VULKAN_HPP_NAMESPACE using NativeType = VkSetStateFlagsIndirectCommandNV; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR SetStateFlagsIndirectCommandNV( uint32_t data_ = {} ) VULKAN_HPP_NOEXCEPT : data( data_ ) {} + VULKAN_HPP_CONSTEXPR SetStateFlagsIndirectCommandNV( uint32_t data_ = {} ) VULKAN_HPP_NOEXCEPT : data{ data_ } {} VULKAN_HPP_CONSTEXPR SetStateFlagsIndirectCommandNV( SetStateFlagsIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; @@ -108791,19 +111571,19 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::PushConstantRange * pPushConstantRanges_ = {}, const VULKAN_HPP_NAMESPACE::SpecializationInfo * pSpecializationInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , stage( stage_ ) - , nextStage( nextStage_ ) - , codeType( codeType_ ) - , codeSize( codeSize_ ) - , pCode( pCode_ ) - , pName( pName_ ) - , setLayoutCount( setLayoutCount_ ) - , pSetLayouts( pSetLayouts_ ) - , pushConstantRangeCount( pushConstantRangeCount_ ) - , pPushConstantRanges( pPushConstantRanges_ ) - , pSpecializationInfo( pSpecializationInfo_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , stage{ stage_ } + , nextStage{ nextStage_ } + , codeType{ codeType_ } + , codeSize{ codeSize_ } + , pCode{ pCode_ } + , pName{ pName_ } + , setLayoutCount{ setLayoutCount_ } + , pSetLayouts{ pSetLayouts_ } + , pushConstantRangeCount{ pushConstantRangeCount_ } + , pPushConstantRanges{ pPushConstantRanges_ } + , pSpecializationInfo{ pSpecializationInfo_ } { } @@ -109097,10 +111877,10 @@ namespace VULKAN_HPP_NAMESPACE size_t codeSize_ = {}, const uint32_t * pCode_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , codeSize( codeSize_ ) - , pCode( pCode_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , codeSize{ codeSize_ } + , pCode{ pCode_ } { } @@ -109233,9 +112013,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_14 ShaderModuleIdentifierEXT( uint32_t identifierSize_ = {}, std::array const & identifier_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , identifierSize( identifierSize_ ) - , identifier( identifier_ ) + : pNext{ pNext_ } + , identifierSize{ identifierSize_ } + , identifier{ identifier_ } { } @@ -109246,15 +112026,6 @@ namespace VULKAN_HPP_NAMESPACE { } -# if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - ShaderModuleIdentifierEXT( VULKAN_HPP_NAMESPACE::ArrayProxy const & identifier_, void * pNext_ = nullptr ) - : pNext( pNext_ ), identifierSize( std::min( static_cast( identifier_.size() ), VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT ) ) - { - VULKAN_HPP_ASSERT( identifier_.size() < VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT ); - memcpy( identifier, identifier_.data(), identifierSize * sizeof( uint8_t ) ); - } -# endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - ShaderModuleIdentifierEXT & operator=( ShaderModuleIdentifierEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ @@ -109342,8 +112113,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ShaderModuleValidationCacheCreateInfoEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , validationCache( validationCache_ ) + : pNext{ pNext_ } + , validationCache{ validationCache_ } { } @@ -109440,11 +112211,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t ldsSizePerLocalWorkGroup_ = {}, size_t ldsUsageSizeInBytes_ = {}, size_t scratchMemUsageInBytes_ = {} ) VULKAN_HPP_NOEXCEPT - : numUsedVgprs( numUsedVgprs_ ) - , numUsedSgprs( numUsedSgprs_ ) - , ldsSizePerLocalWorkGroup( ldsSizePerLocalWorkGroup_ ) - , ldsUsageSizeInBytes( ldsUsageSizeInBytes_ ) - , scratchMemUsageInBytes( scratchMemUsageInBytes_ ) + : numUsedVgprs{ numUsedVgprs_ } + , numUsedSgprs{ numUsedSgprs_ } + , ldsSizePerLocalWorkGroup{ ldsSizePerLocalWorkGroup_ } + , ldsUsageSizeInBytes{ ldsUsageSizeInBytes_ } + , scratchMemUsageInBytes{ scratchMemUsageInBytes_ } { } @@ -109525,13 +112296,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t numAvailableVgprs_ = {}, uint32_t numAvailableSgprs_ = {}, std::array const & computeWorkGroupSize_ = {} ) VULKAN_HPP_NOEXCEPT - : shaderStageMask( shaderStageMask_ ) - , resourceUsage( resourceUsage_ ) - , numPhysicalVgprs( numPhysicalVgprs_ ) - , numPhysicalSgprs( numPhysicalSgprs_ ) - , numAvailableVgprs( numAvailableVgprs_ ) - , numAvailableSgprs( numAvailableSgprs_ ) - , computeWorkGroupSize( computeWorkGroupSize_ ) + : shaderStageMask{ shaderStageMask_ } + , resourceUsage{ resourceUsage_ } + , numPhysicalVgprs{ numPhysicalVgprs_ } + , numPhysicalSgprs{ numPhysicalSgprs_ } + , numAvailableVgprs{ numAvailableVgprs_ } + , numAvailableSgprs{ numAvailableSgprs_ } + , computeWorkGroupSize{ computeWorkGroupSize_ } { } @@ -109619,8 +112390,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SharedPresentSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::ImageUsageFlags sharedPresentSupportedUsageFlags_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , sharedPresentSupportedUsageFlags( sharedPresentSupportedUsageFlags_ ) + : pNext{ pNext_ } + , sharedPresentSupportedUsageFlags{ sharedPresentSupportedUsageFlags_ } { } @@ -109700,9 +112471,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SparseImageFormatProperties( VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ = {}, VULKAN_HPP_NAMESPACE::Extent3D imageGranularity_ = {}, VULKAN_HPP_NAMESPACE::SparseImageFormatFlags flags_ = {} ) VULKAN_HPP_NOEXCEPT - : aspectMask( aspectMask_ ) - , imageGranularity( imageGranularity_ ) - , flags( flags_ ) + : aspectMask{ aspectMask_ } + , imageGranularity{ imageGranularity_ } + , flags{ flags_ } { } @@ -109778,8 +112549,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SparseImageFormatProperties2( VULKAN_HPP_NAMESPACE::SparseImageFormatProperties properties_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , properties( properties_ ) + : pNext{ pNext_ } + , properties{ properties_ } { } @@ -109863,11 +112634,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize imageMipTailSize_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize imageMipTailOffset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize imageMipTailStride_ = {} ) VULKAN_HPP_NOEXCEPT - : formatProperties( formatProperties_ ) - , imageMipTailFirstLod( imageMipTailFirstLod_ ) - , imageMipTailSize( imageMipTailSize_ ) - , imageMipTailOffset( imageMipTailOffset_ ) - , imageMipTailStride( imageMipTailStride_ ) + : formatProperties{ formatProperties_ } + , imageMipTailFirstLod{ imageMipTailFirstLod_ } + , imageMipTailSize{ imageMipTailSize_ } + , imageMipTailOffset{ imageMipTailOffset_ } + , imageMipTailStride{ imageMipTailStride_ } { } @@ -109951,8 +112722,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SparseImageMemoryRequirements2( VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements memoryRequirements_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memoryRequirements( memoryRequirements_ ) + : pNext{ pNext_ } + , memoryRequirements{ memoryRequirements_ } { } @@ -110038,9 +112809,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR StreamDescriptorSurfaceCreateInfoGGP( VULKAN_HPP_NAMESPACE::StreamDescriptorSurfaceCreateFlagsGGP flags_ = {}, GgpStreamDescriptor streamDescriptor_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , streamDescriptor( streamDescriptor_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , streamDescriptor{ streamDescriptor_ } { } @@ -110155,9 +112926,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR StridedDeviceAddressRegionKHR( VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize stride_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {} ) VULKAN_HPP_NOEXCEPT - : deviceAddress( deviceAddress_ ) - , stride( stride_ ) - , size( size_ ) + : deviceAddress{ deviceAddress_ } + , stride{ stride_ } + , size{ size_ } { } @@ -110259,14 +113030,14 @@ namespace VULKAN_HPP_NAMESPACE uint32_t signalSemaphoreCount_ = {}, const VULKAN_HPP_NAMESPACE::Semaphore * pSignalSemaphores_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , waitSemaphoreCount( waitSemaphoreCount_ ) - , pWaitSemaphores( pWaitSemaphores_ ) - , pWaitDstStageMask( pWaitDstStageMask_ ) - , commandBufferCount( commandBufferCount_ ) - , pCommandBuffers( pCommandBuffers_ ) - , signalSemaphoreCount( signalSemaphoreCount_ ) - , pSignalSemaphores( pSignalSemaphores_ ) + : pNext{ pNext_ } + , waitSemaphoreCount{ waitSemaphoreCount_ } + , pWaitSemaphores{ pWaitSemaphores_ } + , pWaitDstStageMask{ pWaitDstStageMask_ } + , commandBufferCount{ commandBufferCount_ } + , pCommandBuffers{ pCommandBuffers_ } + , signalSemaphoreCount{ signalSemaphoreCount_ } + , pSignalSemaphores{ pSignalSemaphores_ } { } @@ -110485,14 +113256,14 @@ namespace VULKAN_HPP_NAMESPACE uint32_t signalSemaphoreInfoCount_ = {}, const VULKAN_HPP_NAMESPACE::SemaphoreSubmitInfo * pSignalSemaphoreInfos_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , waitSemaphoreInfoCount( waitSemaphoreInfoCount_ ) - , pWaitSemaphoreInfos( pWaitSemaphoreInfos_ ) - , commandBufferInfoCount( commandBufferInfoCount_ ) - , pCommandBufferInfos( pCommandBufferInfos_ ) - , signalSemaphoreInfoCount( signalSemaphoreInfoCount_ ) - , pSignalSemaphoreInfos( pSignalSemaphoreInfos_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , waitSemaphoreInfoCount{ waitSemaphoreInfoCount_ } + , pWaitSemaphoreInfos{ pWaitSemaphoreInfos_ } + , commandBufferInfoCount{ commandBufferInfoCount_ } + , pCommandBufferInfos{ pCommandBufferInfos_ } + , signalSemaphoreInfoCount{ signalSemaphoreInfoCount_ } + , pSignalSemaphoreInfos{ pSignalSemaphoreInfos_ } { } @@ -110698,8 +113469,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SubpassBeginInfo( VULKAN_HPP_NAMESPACE::SubpassContents contents_ = VULKAN_HPP_NAMESPACE::SubpassContents::eInline, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , contents( contents_ ) + : pNext{ pNext_ } + , contents{ contents_ } { } @@ -110797,10 +113568,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ResolveModeFlagBits stencilResolveMode_ = VULKAN_HPP_NAMESPACE::ResolveModeFlagBits::eNone, const VULKAN_HPP_NAMESPACE::AttachmentReference2 * pDepthStencilResolveAttachment_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , depthResolveMode( depthResolveMode_ ) - , stencilResolveMode( stencilResolveMode_ ) - , pDepthStencilResolveAttachment( pDepthStencilResolveAttachment_ ) + : pNext{ pNext_ } + , depthResolveMode{ depthResolveMode_ } + , stencilResolveMode{ stencilResolveMode_ } + , pDepthStencilResolveAttachment{ pDepthStencilResolveAttachment_ } { } @@ -110918,7 +113689,7 @@ namespace VULKAN_HPP_NAMESPACE static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSubpassEndInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR SubpassEndInfo( const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext( pNext_ ) {} + VULKAN_HPP_CONSTEXPR SubpassEndInfo( const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } {} VULKAN_HPP_CONSTEXPR SubpassEndInfo( SubpassEndInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; @@ -111005,9 +113776,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SubpassFragmentDensityMapOffsetEndInfoQCOM( uint32_t fragmentDensityOffsetCount_ = {}, const VULKAN_HPP_NAMESPACE::Offset2D * pFragmentDensityOffsets_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fragmentDensityOffsetCount( fragmentDensityOffsetCount_ ) - , pFragmentDensityOffsets( pFragmentDensityOffsets_ ) + : pNext{ pNext_ } + , fragmentDensityOffsetCount{ fragmentDensityOffsetCount_ } + , pFragmentDensityOffsets{ pFragmentDensityOffsets_ } { } @@ -111132,8 +113903,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SubpassResolvePerformanceQueryEXT( VULKAN_HPP_NAMESPACE::Bool32 optimal_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , optimal( optimal_ ) + : pNext{ pNext_ } + , optimal{ optimal_ } { } @@ -111216,9 +113987,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SubpassShadingPipelineCreateInfoHUAWEI( VULKAN_HPP_NAMESPACE::RenderPass renderPass_ = {}, uint32_t subpass_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , renderPass( renderPass_ ) - , subpass( subpass_ ) + : pNext{ pNext_ } + , renderPass{ renderPass_ } + , subpass{ subpass_ } { } @@ -111320,8 +114091,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SubresourceHostMemcpySizeEXT( VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , size( size_ ) + : pNext{ pNext_ } + , size{ size_ } { } @@ -111402,8 +114173,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SubresourceLayout2KHR( VULKAN_HPP_NAMESPACE::SubresourceLayout subresourceLayout_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , subresourceLayout( subresourceLayout_ ) + : pNext{ pNext_ } + , subresourceLayout{ subresourceLayout_ } { } @@ -111498,18 +114269,18 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageUsageFlags supportedUsageFlags_ = {}, VULKAN_HPP_NAMESPACE::SurfaceCounterFlagsEXT supportedSurfaceCounters_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , minImageCount( minImageCount_ ) - , maxImageCount( maxImageCount_ ) - , currentExtent( currentExtent_ ) - , minImageExtent( minImageExtent_ ) - , maxImageExtent( maxImageExtent_ ) - , maxImageArrayLayers( maxImageArrayLayers_ ) - , supportedTransforms( supportedTransforms_ ) - , currentTransform( currentTransform_ ) - , supportedCompositeAlpha( supportedCompositeAlpha_ ) - , supportedUsageFlags( supportedUsageFlags_ ) - , supportedSurfaceCounters( supportedSurfaceCounters_ ) + : pNext{ pNext_ } + , minImageCount{ minImageCount_ } + , maxImageCount{ maxImageCount_ } + , currentExtent{ currentExtent_ } + , minImageExtent{ minImageExtent_ } + , maxImageExtent{ maxImageExtent_ } + , maxImageArrayLayers{ maxImageArrayLayers_ } + , supportedTransforms{ supportedTransforms_ } + , currentTransform{ currentTransform_ } + , supportedCompositeAlpha{ supportedCompositeAlpha_ } + , supportedUsageFlags{ supportedUsageFlags_ } + , supportedSurfaceCounters{ supportedSurfaceCounters_ } { } @@ -111635,16 +114406,16 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR currentTransform_ = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity, VULKAN_HPP_NAMESPACE::CompositeAlphaFlagsKHR supportedCompositeAlpha_ = {}, VULKAN_HPP_NAMESPACE::ImageUsageFlags supportedUsageFlags_ = {} ) VULKAN_HPP_NOEXCEPT - : minImageCount( minImageCount_ ) - , maxImageCount( maxImageCount_ ) - , currentExtent( currentExtent_ ) - , minImageExtent( minImageExtent_ ) - , maxImageExtent( maxImageExtent_ ) - , maxImageArrayLayers( maxImageArrayLayers_ ) - , supportedTransforms( supportedTransforms_ ) - , currentTransform( currentTransform_ ) - , supportedCompositeAlpha( supportedCompositeAlpha_ ) - , supportedUsageFlags( supportedUsageFlags_ ) + : minImageCount{ minImageCount_ } + , maxImageCount{ maxImageCount_ } + , currentExtent{ currentExtent_ } + , minImageExtent{ minImageExtent_ } + , maxImageExtent{ maxImageExtent_ } + , maxImageArrayLayers{ maxImageArrayLayers_ } + , supportedTransforms{ supportedTransforms_ } + , currentTransform{ currentTransform_ } + , supportedCompositeAlpha{ supportedCompositeAlpha_ } + , supportedUsageFlags{ supportedUsageFlags_ } { } @@ -111748,8 +114519,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SurfaceCapabilities2KHR( VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR surfaceCapabilities_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , surfaceCapabilities( surfaceCapabilities_ ) + : pNext{ pNext_ } + , surfaceCapabilities{ surfaceCapabilities_ } { } @@ -111832,8 +114603,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SurfaceCapabilitiesFullScreenExclusiveEXT( VULKAN_HPP_NAMESPACE::Bool32 fullScreenExclusiveSupported_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fullScreenExclusiveSupported( fullScreenExclusiveSupported_ ) + : pNext{ pNext_ } + , fullScreenExclusiveSupported{ fullScreenExclusiveSupported_ } { } @@ -111853,21 +114624,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -# if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 SurfaceCapabilitiesFullScreenExclusiveEXT & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT - { - pNext = pNext_; - return *this; - } - - VULKAN_HPP_CONSTEXPR_14 SurfaceCapabilitiesFullScreenExclusiveEXT & - setFullScreenExclusiveSupported( VULKAN_HPP_NAMESPACE::Bool32 fullScreenExclusiveSupported_ ) VULKAN_HPP_NOEXCEPT - { - fullScreenExclusiveSupported = fullScreenExclusiveSupported_; - return *this; - } -# endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkSurfaceCapabilitiesFullScreenExclusiveEXT const &() const VULKAN_HPP_NOEXCEPT { return *reinterpret_cast( this ); @@ -111931,8 +114687,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SurfaceCapabilitiesPresentBarrierNV( VULKAN_HPP_NAMESPACE::Bool32 presentBarrierSupported_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , presentBarrierSupported( presentBarrierSupported_ ) + : pNext{ pNext_ } + , presentBarrierSupported{ presentBarrierSupported_ } { } @@ -111952,21 +114708,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 SurfaceCapabilitiesPresentBarrierNV & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT - { - pNext = pNext_; - return *this; - } - - VULKAN_HPP_CONSTEXPR_14 SurfaceCapabilitiesPresentBarrierNV & - setPresentBarrierSupported( VULKAN_HPP_NAMESPACE::Bool32 presentBarrierSupported_ ) VULKAN_HPP_NOEXCEPT - { - presentBarrierSupported = presentBarrierSupported_; - return *this; - } -#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkSurfaceCapabilitiesPresentBarrierNV const &() const VULKAN_HPP_NOEXCEPT { return *reinterpret_cast( this ); @@ -112027,8 +114768,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SurfaceFormatKHR( VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::ColorSpaceKHR colorSpace_ = VULKAN_HPP_NAMESPACE::ColorSpaceKHR::eSrgbNonlinear ) VULKAN_HPP_NOEXCEPT - : format( format_ ) - , colorSpace( colorSpace_ ) + : format{ format_ } + , colorSpace{ colorSpace_ } { } @@ -112099,8 +114840,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SurfaceFormat2KHR( VULKAN_HPP_NAMESPACE::SurfaceFormatKHR surfaceFormat_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , surfaceFormat( surfaceFormat_ ) + : pNext{ pNext_ } + , surfaceFormat{ surfaceFormat_ } { } @@ -112181,8 +114922,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SurfaceFullScreenExclusiveInfoEXT( VULKAN_HPP_NAMESPACE::FullScreenExclusiveEXT fullScreenExclusive_ = VULKAN_HPP_NAMESPACE::FullScreenExclusiveEXT::eDefault, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , fullScreenExclusive( fullScreenExclusive_ ) + : pNext{ pNext_ } + , fullScreenExclusive{ fullScreenExclusive_ } { } @@ -112280,8 +115021,8 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SurfaceFullScreenExclusiveWin32InfoEXT( HMONITOR hmonitor_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , hmonitor( hmonitor_ ) + : pNext{ pNext_ } + , hmonitor{ hmonitor_ } { } @@ -112379,9 +115120,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SurfacePresentModeCompatibilityEXT( uint32_t presentModeCount_ = {}, VULKAN_HPP_NAMESPACE::PresentModeKHR * pPresentModes_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , presentModeCount( presentModeCount_ ) - , pPresentModes( pPresentModes_ ) + : pNext{ pNext_ } + , presentModeCount{ presentModeCount_ } + , pPresentModes{ pPresentModes_ } { } @@ -112502,8 +115243,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SurfacePresentModeEXT( VULKAN_HPP_NAMESPACE::PresentModeKHR presentMode_ = VULKAN_HPP_NAMESPACE::PresentModeKHR::eImmediate, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , presentMode( presentMode_ ) + : pNext{ pNext_ } + , presentMode{ presentMode_ } { } @@ -112603,12 +115344,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Extent2D minScaledImageExtent_ = {}, VULKAN_HPP_NAMESPACE::Extent2D maxScaledImageExtent_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , supportedPresentScaling( supportedPresentScaling_ ) - , supportedPresentGravityX( supportedPresentGravityX_ ) - , supportedPresentGravityY( supportedPresentGravityY_ ) - , minScaledImageExtent( minScaledImageExtent_ ) - , maxScaledImageExtent( maxScaledImageExtent_ ) + : pNext{ pNext_ } + , supportedPresentScaling{ supportedPresentScaling_ } + , supportedPresentGravityX{ supportedPresentGravityX_ } + , supportedPresentGravityY{ supportedPresentGravityY_ } + , minScaledImageExtent{ minScaledImageExtent_ } + , maxScaledImageExtent{ maxScaledImageExtent_ } { } @@ -112628,49 +115369,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 SurfacePresentScalingCapabilitiesEXT & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT - { - pNext = pNext_; - return *this; - } - - VULKAN_HPP_CONSTEXPR_14 SurfacePresentScalingCapabilitiesEXT & - setSupportedPresentScaling( VULKAN_HPP_NAMESPACE::PresentScalingFlagsEXT supportedPresentScaling_ ) VULKAN_HPP_NOEXCEPT - { - supportedPresentScaling = supportedPresentScaling_; - return *this; - } - - VULKAN_HPP_CONSTEXPR_14 SurfacePresentScalingCapabilitiesEXT & - setSupportedPresentGravityX( VULKAN_HPP_NAMESPACE::PresentGravityFlagsEXT supportedPresentGravityX_ ) VULKAN_HPP_NOEXCEPT - { - supportedPresentGravityX = supportedPresentGravityX_; - return *this; - } - - VULKAN_HPP_CONSTEXPR_14 SurfacePresentScalingCapabilitiesEXT & - setSupportedPresentGravityY( VULKAN_HPP_NAMESPACE::PresentGravityFlagsEXT supportedPresentGravityY_ ) VULKAN_HPP_NOEXCEPT - { - supportedPresentGravityY = supportedPresentGravityY_; - return *this; - } - - VULKAN_HPP_CONSTEXPR_14 SurfacePresentScalingCapabilitiesEXT & - setMinScaledImageExtent( VULKAN_HPP_NAMESPACE::Extent2D const & minScaledImageExtent_ ) VULKAN_HPP_NOEXCEPT - { - minScaledImageExtent = minScaledImageExtent_; - return *this; - } - - VULKAN_HPP_CONSTEXPR_14 SurfacePresentScalingCapabilitiesEXT & - setMaxScaledImageExtent( VULKAN_HPP_NAMESPACE::Extent2D const & maxScaledImageExtent_ ) VULKAN_HPP_NOEXCEPT - { - maxScaledImageExtent = maxScaledImageExtent_; - return *this; - } -#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkSurfacePresentScalingCapabilitiesEXT const &() const VULKAN_HPP_NOEXCEPT { return *reinterpret_cast( this ); @@ -112745,8 +115443,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SurfaceProtectedCapabilitiesKHR( VULKAN_HPP_NAMESPACE::Bool32 supportsProtected_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , supportsProtected( supportsProtected_ ) + : pNext{ pNext_ } + , supportsProtected{ supportsProtected_ } { } @@ -112766,20 +115464,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 SurfaceProtectedCapabilitiesKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT - { - pNext = pNext_; - return *this; - } - - VULKAN_HPP_CONSTEXPR_14 SurfaceProtectedCapabilitiesKHR & setSupportsProtected( VULKAN_HPP_NAMESPACE::Bool32 supportsProtected_ ) VULKAN_HPP_NOEXCEPT - { - supportsProtected = supportsProtected_; - return *this; - } -#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkSurfaceProtectedCapabilitiesKHR const &() const VULKAN_HPP_NOEXCEPT { return *reinterpret_cast( this ); @@ -112842,8 +115526,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SwapchainCounterCreateInfoEXT( VULKAN_HPP_NAMESPACE::SurfaceCounterFlagsEXT surfaceCounters_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , surfaceCounters( surfaceCounters_ ) + : pNext{ pNext_ } + , surfaceCounters{ surfaceCounters_ } { } @@ -112956,23 +115640,23 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 clipped_ = {}, VULKAN_HPP_NAMESPACE::SwapchainKHR oldSwapchain_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , surface( surface_ ) - , minImageCount( minImageCount_ ) - , imageFormat( imageFormat_ ) - , imageColorSpace( imageColorSpace_ ) - , imageExtent( imageExtent_ ) - , imageArrayLayers( imageArrayLayers_ ) - , imageUsage( imageUsage_ ) - , imageSharingMode( imageSharingMode_ ) - , queueFamilyIndexCount( queueFamilyIndexCount_ ) - , pQueueFamilyIndices( pQueueFamilyIndices_ ) - , preTransform( preTransform_ ) - , compositeAlpha( compositeAlpha_ ) - , presentMode( presentMode_ ) - , clipped( clipped_ ) - , oldSwapchain( oldSwapchain_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , surface{ surface_ } + , minImageCount{ minImageCount_ } + , imageFormat{ imageFormat_ } + , imageColorSpace{ imageColorSpace_ } + , imageExtent{ imageExtent_ } + , imageArrayLayers{ imageArrayLayers_ } + , imageUsage{ imageUsage_ } + , imageSharingMode{ imageSharingMode_ } + , queueFamilyIndexCount{ queueFamilyIndexCount_ } + , pQueueFamilyIndices{ pQueueFamilyIndices_ } + , preTransform{ preTransform_ } + , compositeAlpha{ compositeAlpha_ } + , presentMode{ presentMode_ } + , clipped{ clipped_ } + , oldSwapchain{ oldSwapchain_ } { } @@ -113260,8 +115944,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SwapchainDisplayNativeHdrCreateInfoAMD( VULKAN_HPP_NAMESPACE::Bool32 localDimmingEnable_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , localDimmingEnable( localDimmingEnable_ ) + : pNext{ pNext_ } + , localDimmingEnable{ localDimmingEnable_ } { } @@ -113357,8 +116041,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SwapchainLatencyCreateInfoNV( VULKAN_HPP_NAMESPACE::Bool32 latencyModeEnable_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , latencyModeEnable( latencyModeEnable_ ) + : pNext{ pNext_ } + , latencyModeEnable{ latencyModeEnable_ } { } @@ -113454,8 +116138,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR SwapchainPresentBarrierCreateInfoNV( VULKAN_HPP_NAMESPACE::Bool32 presentBarrierEnable_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , presentBarrierEnable( presentBarrierEnable_ ) + : pNext{ pNext_ } + , presentBarrierEnable{ presentBarrierEnable_ } { } @@ -113553,9 +116237,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SwapchainPresentFenceInfoEXT( uint32_t swapchainCount_ = {}, const VULKAN_HPP_NAMESPACE::Fence * pFences_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , swapchainCount( swapchainCount_ ) - , pFences( pFences_ ) + : pNext{ pNext_ } + , swapchainCount{ swapchainCount_ } + , pFences{ pFences_ } { } @@ -113677,9 +116361,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SwapchainPresentModeInfoEXT( uint32_t swapchainCount_ = {}, const VULKAN_HPP_NAMESPACE::PresentModeKHR * pPresentModes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , swapchainCount( swapchainCount_ ) - , pPresentModes( pPresentModes_ ) + : pNext{ pNext_ } + , swapchainCount{ swapchainCount_ } + , pPresentModes{ pPresentModes_ } { } @@ -113801,9 +116485,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR SwapchainPresentModesCreateInfoEXT( uint32_t presentModeCount_ = {}, const VULKAN_HPP_NAMESPACE::PresentModeKHR * pPresentModes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , presentModeCount( presentModeCount_ ) - , pPresentModes( pPresentModes_ ) + : pNext{ pNext_ } + , presentModeCount{ presentModeCount_ } + , pPresentModes{ pPresentModes_ } { } @@ -113927,10 +116611,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::PresentGravityFlagsEXT presentGravityX_ = {}, VULKAN_HPP_NAMESPACE::PresentGravityFlagsEXT presentGravityY_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , scalingBehavior( scalingBehavior_ ) - , presentGravityX( presentGravityX_ ) - , presentGravityY( presentGravityY_ ) + : pNext{ pNext_ } + , scalingBehavior{ scalingBehavior_ } + , presentGravityX{ presentGravityX_ } + , presentGravityY{ presentGravityY_ } { } @@ -114048,8 +116732,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR TextureLODGatherFormatPropertiesAMD( VULKAN_HPP_NAMESPACE::Bool32 supportsTextureGatherLODBiasAMD_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , supportsTextureGatherLODBiasAMD( supportsTextureGatherLODBiasAMD_ ) + : pNext{ pNext_ } + , supportsTextureGatherLODBiasAMD{ supportsTextureGatherLODBiasAMD_ } { } @@ -114133,10 +116817,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Extent2D apronSize_ = {}, VULKAN_HPP_NAMESPACE::Offset2D origin_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , tileSize( tileSize_ ) - , apronSize( apronSize_ ) - , origin( origin_ ) + : pNext{ pNext_ } + , tileSize{ tileSize_ } + , apronSize{ apronSize_ } + , origin{ origin_ } { } @@ -114250,11 +116934,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t signalSemaphoreValueCount_ = {}, const uint64_t * pSignalSemaphoreValues_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , waitSemaphoreValueCount( waitSemaphoreValueCount_ ) - , pWaitSemaphoreValues( pWaitSemaphoreValues_ ) - , signalSemaphoreValueCount( signalSemaphoreValueCount_ ) - , pSignalSemaphoreValues( pSignalSemaphoreValues_ ) + : pNext{ pNext_ } + , waitSemaphoreValueCount{ waitSemaphoreValueCount_ } + , pWaitSemaphoreValues{ pWaitSemaphoreValues_ } + , signalSemaphoreValueCount{ signalSemaphoreValueCount_ } + , pSignalSemaphoreValues{ pSignalSemaphoreValues_ } { } @@ -114422,20 +117106,20 @@ namespace VULKAN_HPP_NAMESPACE uint32_t width_ = {}, uint32_t height_ = {}, uint32_t depth_ = {} ) VULKAN_HPP_NOEXCEPT - : raygenShaderRecordAddress( raygenShaderRecordAddress_ ) - , raygenShaderRecordSize( raygenShaderRecordSize_ ) - , missShaderBindingTableAddress( missShaderBindingTableAddress_ ) - , missShaderBindingTableSize( missShaderBindingTableSize_ ) - , missShaderBindingTableStride( missShaderBindingTableStride_ ) - , hitShaderBindingTableAddress( hitShaderBindingTableAddress_ ) - , hitShaderBindingTableSize( hitShaderBindingTableSize_ ) - , hitShaderBindingTableStride( hitShaderBindingTableStride_ ) - , callableShaderBindingTableAddress( callableShaderBindingTableAddress_ ) - , callableShaderBindingTableSize( callableShaderBindingTableSize_ ) - , callableShaderBindingTableStride( callableShaderBindingTableStride_ ) - , width( width_ ) - , height( height_ ) - , depth( depth_ ) + : raygenShaderRecordAddress{ raygenShaderRecordAddress_ } + , raygenShaderRecordSize{ raygenShaderRecordSize_ } + , missShaderBindingTableAddress{ missShaderBindingTableAddress_ } + , missShaderBindingTableSize{ missShaderBindingTableSize_ } + , missShaderBindingTableStride{ missShaderBindingTableStride_ } + , hitShaderBindingTableAddress{ hitShaderBindingTableAddress_ } + , hitShaderBindingTableSize{ hitShaderBindingTableSize_ } + , hitShaderBindingTableStride{ hitShaderBindingTableStride_ } + , callableShaderBindingTableAddress{ callableShaderBindingTableAddress_ } + , callableShaderBindingTableSize{ callableShaderBindingTableSize_ } + , callableShaderBindingTableStride{ callableShaderBindingTableStride_ } + , width{ width_ } + , height{ height_ } + , depth{ depth_ } { } @@ -114648,9 +117332,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR TraceRaysIndirectCommandKHR( uint32_t width_ = {}, uint32_t height_ = {}, uint32_t depth_ = {} ) VULKAN_HPP_NOEXCEPT - : width( width_ ) - , height( height_ ) - , depth( depth_ ) + : width{ width_ } + , height{ height_ } + , depth{ depth_ } { } @@ -114753,10 +117437,10 @@ namespace VULKAN_HPP_NAMESPACE size_t initialDataSize_ = {}, const void * pInitialData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , initialDataSize( initialDataSize_ ) - , pInitialData( pInitialData_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , initialDataSize{ initialDataSize_ } + , pInitialData{ pInitialData_ } { } @@ -114894,11 +117578,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t disabledValidationFeatureCount_ = {}, const VULKAN_HPP_NAMESPACE::ValidationFeatureDisableEXT * pDisabledValidationFeatures_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , enabledValidationFeatureCount( enabledValidationFeatureCount_ ) - , pEnabledValidationFeatures( pEnabledValidationFeatures_ ) - , disabledValidationFeatureCount( disabledValidationFeatureCount_ ) - , pDisabledValidationFeatures( pDisabledValidationFeatures_ ) + : pNext{ pNext_ } + , enabledValidationFeatureCount{ enabledValidationFeatureCount_ } + , pEnabledValidationFeatures{ pEnabledValidationFeatures_ } + , disabledValidationFeatureCount{ disabledValidationFeatureCount_ } + , pDisabledValidationFeatures{ pDisabledValidationFeatures_ } { } @@ -115061,9 +117745,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR ValidationFlagsEXT( uint32_t disabledValidationCheckCount_ = {}, const VULKAN_HPP_NAMESPACE::ValidationCheckEXT * pDisabledValidationChecks_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , disabledValidationCheckCount( disabledValidationCheckCount_ ) - , pDisabledValidationChecks( pDisabledValidationChecks_ ) + : pNext{ pNext_ } + , disabledValidationCheckCount{ disabledValidationCheckCount_ } + , pDisabledValidationChecks{ pDisabledValidationChecks_ } { } @@ -115188,11 +117872,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, uint32_t offset_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , location( location_ ) - , binding( binding_ ) - , format( format_ ) - , offset( offset_ ) + : pNext{ pNext_ } + , location{ location_ } + , binding{ binding_ } + , format{ format_ } + , offset{ offset_ } { } @@ -115318,11 +118002,11 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VertexInputRate inputRate_ = VULKAN_HPP_NAMESPACE::VertexInputRate::eVertex, uint32_t divisor_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , binding( binding_ ) - , stride( stride_ ) - , inputRate( inputRate_ ) - , divisor( divisor_ ) + : pNext{ pNext_ } + , binding{ binding_ } + , stride{ stride_ } + , inputRate{ inputRate_ } + , divisor{ divisor_ } { } @@ -115446,9 +118130,9 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR ViSurfaceCreateInfoNN( VULKAN_HPP_NAMESPACE::ViSurfaceCreateFlagsNN flags_ = {}, void * window_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , window( window_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , window{ window_ } { } @@ -115555,11 +118239,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t baseArrayLayer_ = {}, VULKAN_HPP_NAMESPACE::ImageView imageViewBinding_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , codedOffset( codedOffset_ ) - , codedExtent( codedExtent_ ) - , baseArrayLayer( baseArrayLayer_ ) - , imageViewBinding( imageViewBinding_ ) + : pNext{ pNext_ } + , codedOffset{ codedOffset_ } + , codedExtent{ codedExtent_ } + , baseArrayLayer{ baseArrayLayer_ } + , imageViewBinding{ imageViewBinding_ } { } @@ -115683,9 +118367,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR VideoReferenceSlotInfoKHR( int32_t slotIndex_ = {}, const VULKAN_HPP_NAMESPACE::VideoPictureResourceInfoKHR * pPictureResource_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , slotIndex( slotIndex_ ) - , pPictureResource( pPictureResource_ ) + : pNext{ pNext_ } + , slotIndex{ slotIndex_ } + , pPictureResource{ pPictureResource_ } { } @@ -115796,12 +118480,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t referenceSlotCount_ = {}, const VULKAN_HPP_NAMESPACE::VideoReferenceSlotInfoKHR * pReferenceSlots_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , videoSession( videoSession_ ) - , videoSessionParameters( videoSessionParameters_ ) - , referenceSlotCount( referenceSlotCount_ ) - , pReferenceSlots( pReferenceSlots_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , videoSession{ videoSession_ } + , videoSessionParameters{ videoSessionParameters_ } + , referenceSlotCount{ referenceSlotCount_ } + , pReferenceSlots{ pReferenceSlots_ } { } @@ -115969,16 +118653,16 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxActiveReferencePictures_ = {}, VULKAN_HPP_NAMESPACE::ExtensionProperties stdHeaderVersion_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , minBitstreamBufferOffsetAlignment( minBitstreamBufferOffsetAlignment_ ) - , minBitstreamBufferSizeAlignment( minBitstreamBufferSizeAlignment_ ) - , pictureAccessGranularity( pictureAccessGranularity_ ) - , minCodedExtent( minCodedExtent_ ) - , maxCodedExtent( maxCodedExtent_ ) - , maxDpbSlots( maxDpbSlots_ ) - , maxActiveReferencePictures( maxActiveReferencePictures_ ) - , stdHeaderVersion( stdHeaderVersion_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , minBitstreamBufferOffsetAlignment{ minBitstreamBufferOffsetAlignment_ } + , minBitstreamBufferSizeAlignment{ minBitstreamBufferSizeAlignment_ } + , pictureAccessGranularity{ pictureAccessGranularity_ } + , minCodedExtent{ minCodedExtent_ } + , maxCodedExtent{ maxCodedExtent_ } + , maxDpbSlots{ maxDpbSlots_ } + , maxActiveReferencePictures{ maxActiveReferencePictures_ } + , stdHeaderVersion{ stdHeaderVersion_ } { } @@ -116092,8 +118776,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoCodingControlInfoKHR( VULKAN_HPP_NAMESPACE::VideoCodingControlFlagsKHR flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , flags{ flags_ } { } @@ -116188,8 +118872,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoDecodeAV1CapabilitiesKHR( StdVideoAV1Level maxLevel_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxLevel( maxLevel_ ) + : pNext{ pNext_ } + , maxLevel{ maxLevel_ } { } @@ -116277,8 +118961,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoDecodeAV1DpbSlotInfoKHR( const StdVideoDecodeAV1ReferenceInfo * pStdReferenceInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pStdReferenceInfo( pStdReferenceInfo_ ) + : pNext{ pNext_ } + , pStdReferenceInfo{ pStdReferenceInfo_ } { } @@ -116379,13 +119063,13 @@ namespace VULKAN_HPP_NAMESPACE const uint32_t * pTileOffsets_ = {}, const uint32_t * pTileSizes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pStdPictureInfo( pStdPictureInfo_ ) - , referenceNameSlotIndices( referenceNameSlotIndices_ ) - , frameHeaderOffset( frameHeaderOffset_ ) - , tileCount( tileCount_ ) - , pTileOffsets( pTileOffsets_ ) - , pTileSizes( pTileSizes_ ) + : pNext{ pNext_ } + , pStdPictureInfo{ pStdPictureInfo_ } + , referenceNameSlotIndices{ referenceNameSlotIndices_ } + , frameHeaderOffset{ frameHeaderOffset_ } + , tileCount{ tileCount_ } + , pTileOffsets{ pTileOffsets_ } + , pTileSizes{ pTileSizes_ } { } @@ -116572,9 +119256,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR VideoDecodeAV1ProfileInfoKHR( StdVideoAV1Profile stdProfile_ = {}, VULKAN_HPP_NAMESPACE::Bool32 filmGrainSupport_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stdProfile( stdProfile_ ) - , filmGrainSupport( filmGrainSupport_ ) + : pNext{ pNext_ } + , stdProfile{ stdProfile_ } + , filmGrainSupport{ filmGrainSupport_ } { } @@ -116686,8 +119370,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoDecodeAV1SessionParametersCreateInfoKHR( const StdVideoAV1SequenceHeader * pStdSequenceHeader_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pStdSequenceHeader( pStdSequenceHeader_ ) + : pNext{ pNext_ } + , pStdSequenceHeader{ pStdSequenceHeader_ } { } @@ -116784,8 +119468,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoDecodeCapabilitiesKHR( VULKAN_HPP_NAMESPACE::VideoDecodeCapabilityFlagsKHR flags_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , flags{ flags_ } { } @@ -116868,9 +119552,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR VideoDecodeH264CapabilitiesKHR( StdVideoH264LevelIdc maxLevelIdc_ = {}, VULKAN_HPP_NAMESPACE::Offset2D fieldOffsetGranularity_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxLevelIdc( maxLevelIdc_ ) - , fieldOffsetGranularity( fieldOffsetGranularity_ ) + : pNext{ pNext_ } + , maxLevelIdc{ maxLevelIdc_ } + , fieldOffsetGranularity{ fieldOffsetGranularity_ } { } @@ -116962,8 +119646,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoDecodeH264DpbSlotInfoKHR( const StdVideoDecodeH264ReferenceInfo * pStdReferenceInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pStdReferenceInfo( pStdReferenceInfo_ ) + : pNext{ pNext_ } + , pStdReferenceInfo{ pStdReferenceInfo_ } { } @@ -117062,10 +119746,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t sliceCount_ = {}, const uint32_t * pSliceOffsets_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pStdPictureInfo( pStdPictureInfo_ ) - , sliceCount( sliceCount_ ) - , pSliceOffsets( pSliceOffsets_ ) + : pNext{ pNext_ } + , pStdPictureInfo{ pStdPictureInfo_ } + , sliceCount{ sliceCount_ } + , pSliceOffsets{ pSliceOffsets_ } { } @@ -117200,9 +119884,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VideoDecodeH264PictureLayoutFlagBitsKHR pictureLayout_ = VULKAN_HPP_NAMESPACE::VideoDecodeH264PictureLayoutFlagBitsKHR::eProgressive, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stdProfileIdc( stdProfileIdc_ ) - , pictureLayout( pictureLayout_ ) + : pNext{ pNext_ } + , stdProfileIdc{ stdProfileIdc_ } + , pictureLayout{ pictureLayout_ } { } @@ -117321,11 +120005,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t stdPPSCount_ = {}, const StdVideoH264PictureParameterSet * pStdPPSs_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stdSPSCount( stdSPSCount_ ) - , pStdSPSs( pStdSPSs_ ) - , stdPPSCount( stdPPSCount_ ) - , pStdPPSs( pStdPPSs_ ) + : pNext{ pNext_ } + , stdSPSCount{ stdSPSCount_ } + , pStdSPSs{ pStdSPSs_ } + , stdPPSCount{ stdPPSCount_ } + , pStdPPSs{ pStdPPSs_ } { } @@ -117484,10 +120168,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxStdPPSCount_ = {}, const VULKAN_HPP_NAMESPACE::VideoDecodeH264SessionParametersAddInfoKHR * pParametersAddInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxStdSPSCount( maxStdSPSCount_ ) - , maxStdPPSCount( maxStdPPSCount_ ) - , pParametersAddInfo( pParametersAddInfo_ ) + : pNext{ pNext_ } + , maxStdSPSCount{ maxStdSPSCount_ } + , maxStdPPSCount{ maxStdPPSCount_ } + , pParametersAddInfo{ pParametersAddInfo_ } { } @@ -117603,8 +120287,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoDecodeH265CapabilitiesKHR( StdVideoH265LevelIdc maxLevelIdc_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxLevelIdc( maxLevelIdc_ ) + : pNext{ pNext_ } + , maxLevelIdc{ maxLevelIdc_ } { } @@ -117692,8 +120376,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoDecodeH265DpbSlotInfoKHR( const StdVideoDecodeH265ReferenceInfo * pStdReferenceInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pStdReferenceInfo( pStdReferenceInfo_ ) + : pNext{ pNext_ } + , pStdReferenceInfo{ pStdReferenceInfo_ } { } @@ -117792,10 +120476,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t sliceSegmentCount_ = {}, const uint32_t * pSliceSegmentOffsets_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pStdPictureInfo( pStdPictureInfo_ ) - , sliceSegmentCount( sliceSegmentCount_ ) - , pSliceSegmentOffsets( pSliceSegmentOffsets_ ) + : pNext{ pNext_ } + , pStdPictureInfo{ pStdPictureInfo_ } + , sliceSegmentCount{ sliceSegmentCount_ } + , pSliceSegmentOffsets{ pSliceSegmentOffsets_ } { } @@ -117931,8 +120615,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoDecodeH265ProfileInfoKHR( StdVideoH265ProfileIdc stdProfileIdc_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stdProfileIdc( stdProfileIdc_ ) + : pNext{ pNext_ } + , stdProfileIdc{ stdProfileIdc_ } { } @@ -118039,13 +120723,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t stdPPSCount_ = {}, const StdVideoH265PictureParameterSet * pStdPPSs_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stdVPSCount( stdVPSCount_ ) - , pStdVPSs( pStdVPSs_ ) - , stdSPSCount( stdSPSCount_ ) - , pStdSPSs( pStdSPSs_ ) - , stdPPSCount( stdPPSCount_ ) - , pStdPPSs( pStdPPSs_ ) + : pNext{ pNext_ } + , stdVPSCount{ stdVPSCount_ } + , pStdVPSs{ pStdVPSs_ } + , stdSPSCount{ stdSPSCount_ } + , pStdSPSs{ pStdSPSs_ } + , stdPPSCount{ stdPPSCount_ } + , pStdPPSs{ pStdPPSs_ } { } @@ -118234,11 +120918,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxStdPPSCount_ = {}, const VULKAN_HPP_NAMESPACE::VideoDecodeH265SessionParametersAddInfoKHR * pParametersAddInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxStdVPSCount( maxStdVPSCount_ ) - , maxStdSPSCount( maxStdSPSCount_ ) - , maxStdPPSCount( maxStdPPSCount_ ) - , pParametersAddInfo( pParametersAddInfo_ ) + : pNext{ pNext_ } + , maxStdVPSCount{ maxStdVPSCount_ } + , maxStdSPSCount{ maxStdSPSCount_ } + , maxStdPPSCount{ maxStdPPSCount_ } + , pParametersAddInfo{ pParametersAddInfo_ } { } @@ -118370,15 +121054,15 @@ namespace VULKAN_HPP_NAMESPACE uint32_t referenceSlotCount_ = {}, const VULKAN_HPP_NAMESPACE::VideoReferenceSlotInfoKHR * pReferenceSlots_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , srcBuffer( srcBuffer_ ) - , srcBufferOffset( srcBufferOffset_ ) - , srcBufferRange( srcBufferRange_ ) - , dstPictureResource( dstPictureResource_ ) - , pSetupReferenceSlot( pSetupReferenceSlot_ ) - , referenceSlotCount( referenceSlotCount_ ) - , pReferenceSlots( pReferenceSlots_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , srcBuffer{ srcBuffer_ } + , srcBufferOffset{ srcBufferOffset_ } + , srcBufferRange{ srcBufferRange_ } + , dstPictureResource{ dstPictureResource_ } + , pSetupReferenceSlot{ pSetupReferenceSlot_ } + , referenceSlotCount{ referenceSlotCount_ } + , pReferenceSlots{ pReferenceSlots_ } { } @@ -118568,8 +121252,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoDecodeUsageInfoKHR( VULKAN_HPP_NAMESPACE::VideoDecodeUsageFlagsKHR videoUsageHints_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , videoUsageHints( videoUsageHints_ ) + : pNext{ pNext_ } + , videoUsageHints{ videoUsageHints_ } { } @@ -118671,14 +121355,14 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Extent2D encodeInputPictureGranularity_ = {}, VULKAN_HPP_NAMESPACE::VideoEncodeFeedbackFlagsKHR supportedEncodeFeedbackFlags_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , rateControlModes( rateControlModes_ ) - , maxRateControlLayers( maxRateControlLayers_ ) - , maxBitrate( maxBitrate_ ) - , maxQualityLevels( maxQualityLevels_ ) - , encodeInputPictureGranularity( encodeInputPictureGranularity_ ) - , supportedEncodeFeedbackFlags( supportedEncodeFeedbackFlags_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , rateControlModes{ rateControlModes_ } + , maxRateControlLayers{ maxRateControlLayers_ } + , maxBitrate{ maxBitrate_ } + , maxQualityLevels{ maxQualityLevels_ } + , encodeInputPictureGranularity{ encodeInputPictureGranularity_ } + , supportedEncodeFeedbackFlags{ supportedEncodeFeedbackFlags_ } { } @@ -118796,20 +121480,20 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 requiresGopRemainingFrames_ = {}, VULKAN_HPP_NAMESPACE::VideoEncodeH264StdFlagsKHR stdSyntaxFlags_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , maxLevelIdc( maxLevelIdc_ ) - , maxSliceCount( maxSliceCount_ ) - , maxPPictureL0ReferenceCount( maxPPictureL0ReferenceCount_ ) - , maxBPictureL0ReferenceCount( maxBPictureL0ReferenceCount_ ) - , maxL1ReferenceCount( maxL1ReferenceCount_ ) - , maxTemporalLayerCount( maxTemporalLayerCount_ ) - , expectDyadicTemporalLayerPattern( expectDyadicTemporalLayerPattern_ ) - , minQp( minQp_ ) - , maxQp( maxQp_ ) - , prefersGopRemainingFrames( prefersGopRemainingFrames_ ) - , requiresGopRemainingFrames( requiresGopRemainingFrames_ ) - , stdSyntaxFlags( stdSyntaxFlags_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , maxLevelIdc{ maxLevelIdc_ } + , maxSliceCount{ maxSliceCount_ } + , maxPPictureL0ReferenceCount{ maxPPictureL0ReferenceCount_ } + , maxBPictureL0ReferenceCount{ maxBPictureL0ReferenceCount_ } + , maxL1ReferenceCount{ maxL1ReferenceCount_ } + , maxTemporalLayerCount{ maxTemporalLayerCount_ } + , expectDyadicTemporalLayerPattern{ expectDyadicTemporalLayerPattern_ } + , minQp{ minQp_ } + , maxQp{ maxQp_ } + , prefersGopRemainingFrames{ prefersGopRemainingFrames_ } + , requiresGopRemainingFrames{ requiresGopRemainingFrames_ } + , stdSyntaxFlags{ stdSyntaxFlags_ } { } @@ -118967,8 +121651,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoEncodeH264DpbSlotInfoKHR( const StdVideoEncodeH264ReferenceInfo * pStdReferenceInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pStdReferenceInfo( pStdReferenceInfo_ ) + : pNext{ pNext_ } + , pStdReferenceInfo{ pStdReferenceInfo_ } { } @@ -119061,9 +121745,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoEncodeH264FrameSizeKHR( uint32_t frameISize_ = {}, uint32_t framePSize_ = {}, uint32_t frameBSize_ = {} ) VULKAN_HPP_NOEXCEPT - : frameISize( frameISize_ ) - , framePSize( framePSize_ ) - , frameBSize( frameBSize_ ) + : frameISize{ frameISize_ } + , framePSize{ framePSize_ } + , frameBSize{ frameBSize_ } { } @@ -119162,11 +121846,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t gopRemainingP_ = {}, uint32_t gopRemainingB_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , useGopRemainingFrames( useGopRemainingFrames_ ) - , gopRemainingI( gopRemainingI_ ) - , gopRemainingP( gopRemainingP_ ) - , gopRemainingB( gopRemainingB_ ) + : pNext{ pNext_ } + , useGopRemainingFrames{ useGopRemainingFrames_ } + , gopRemainingI{ gopRemainingI_ } + , gopRemainingP{ gopRemainingP_ } + , gopRemainingB{ gopRemainingB_ } { } @@ -119291,9 +121975,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR VideoEncodeH264NaluSliceInfoKHR( int32_t constantQp_ = {}, const StdVideoEncodeH264SliceHeader * pStdSliceHeader_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , constantQp( constantQp_ ) - , pStdSliceHeader( pStdSliceHeader_ ) + : pNext{ pNext_ } + , constantQp{ constantQp_ } + , pStdSliceHeader{ pStdSliceHeader_ } { } @@ -119399,11 +122083,11 @@ namespace VULKAN_HPP_NAMESPACE const StdVideoEncodeH264PictureInfo * pStdPictureInfo_ = {}, VULKAN_HPP_NAMESPACE::Bool32 generatePrefixNalu_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , naluSliceEntryCount( naluSliceEntryCount_ ) - , pNaluSliceEntries( pNaluSliceEntries_ ) - , pStdPictureInfo( pStdPictureInfo_ ) - , generatePrefixNalu( generatePrefixNalu_ ) + : pNext{ pNext_ } + , naluSliceEntryCount{ naluSliceEntryCount_ } + , pNaluSliceEntries{ pNaluSliceEntries_ } + , pStdPictureInfo{ pStdPictureInfo_ } + , generatePrefixNalu{ generatePrefixNalu_ } { } @@ -119551,8 +122235,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoEncodeH264ProfileInfoKHR( StdVideoH264ProfileIdc stdProfileIdc_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stdProfileIdc( stdProfileIdc_ ) + : pNext{ pNext_ } + , stdProfileIdc{ stdProfileIdc_ } { } @@ -119650,9 +122334,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoEncodeH264QpKHR( int32_t qpI_ = {}, int32_t qpP_ = {}, int32_t qpB_ = {} ) VULKAN_HPP_NOEXCEPT - : qpI( qpI_ ) - , qpP( qpP_ ) - , qpB( qpB_ ) + : qpI{ qpI_ } + , qpP{ qpP_ } + , qpB{ qpB_ } { } @@ -119756,16 +122440,16 @@ namespace VULKAN_HPP_NAMESPACE uint32_t preferredMaxL1ReferenceCount_ = {}, VULKAN_HPP_NAMESPACE::Bool32 preferredStdEntropyCodingModeFlag_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , preferredRateControlFlags( preferredRateControlFlags_ ) - , preferredGopFrameCount( preferredGopFrameCount_ ) - , preferredIdrPeriod( preferredIdrPeriod_ ) - , preferredConsecutiveBFrameCount( preferredConsecutiveBFrameCount_ ) - , preferredTemporalLayerCount( preferredTemporalLayerCount_ ) - , preferredConstantQp( preferredConstantQp_ ) - , preferredMaxL0ReferenceCount( preferredMaxL0ReferenceCount_ ) - , preferredMaxL1ReferenceCount( preferredMaxL1ReferenceCount_ ) - , preferredStdEntropyCodingModeFlag( preferredStdEntropyCodingModeFlag_ ) + : pNext{ pNext_ } + , preferredRateControlFlags{ preferredRateControlFlags_ } + , preferredGopFrameCount{ preferredGopFrameCount_ } + , preferredIdrPeriod{ preferredIdrPeriod_ } + , preferredConsecutiveBFrameCount{ preferredConsecutiveBFrameCount_ } + , preferredTemporalLayerCount{ preferredTemporalLayerCount_ } + , preferredConstantQp{ preferredConstantQp_ } + , preferredMaxL0ReferenceCount{ preferredMaxL0ReferenceCount_ } + , preferredMaxL1ReferenceCount{ preferredMaxL1ReferenceCount_ } + , preferredStdEntropyCodingModeFlag{ preferredStdEntropyCodingModeFlag_ } { } @@ -119884,12 +122568,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t consecutiveBFrameCount_ = {}, uint32_t temporalLayerCount_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , gopFrameCount( gopFrameCount_ ) - , idrPeriod( idrPeriod_ ) - , consecutiveBFrameCount( consecutiveBFrameCount_ ) - , temporalLayerCount( temporalLayerCount_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , gopFrameCount{ gopFrameCount_ } + , idrPeriod{ idrPeriod_ } + , consecutiveBFrameCount{ consecutiveBFrameCount_ } + , temporalLayerCount{ temporalLayerCount_ } { } @@ -120025,13 +122709,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 useMaxFrameSize_ = {}, VULKAN_HPP_NAMESPACE::VideoEncodeH264FrameSizeKHR maxFrameSize_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , useMinQp( useMinQp_ ) - , minQp( minQp_ ) - , useMaxQp( useMaxQp_ ) - , maxQp( maxQp_ ) - , useMaxFrameSize( useMaxFrameSize_ ) - , maxFrameSize( maxFrameSize_ ) + : pNext{ pNext_ } + , useMinQp{ useMinQp_ } + , minQp{ minQp_ } + , useMaxQp{ useMaxQp_ } + , maxQp{ maxQp_ } + , useMaxFrameSize{ useMaxFrameSize_ } + , maxFrameSize{ maxFrameSize_ } { } @@ -120172,9 +122856,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR VideoEncodeH264SessionCreateInfoKHR( VULKAN_HPP_NAMESPACE::Bool32 useMaxLevelIdc_ = {}, StdVideoH264LevelIdc maxLevelIdc_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , useMaxLevelIdc( useMaxLevelIdc_ ) - , maxLevelIdc( maxLevelIdc_ ) + : pNext{ pNext_ } + , useMaxLevelIdc{ useMaxLevelIdc_ } + , maxLevelIdc{ maxLevelIdc_ } { } @@ -120289,11 +122973,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t stdPPSCount_ = {}, const StdVideoH264PictureParameterSet * pStdPPSs_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stdSPSCount( stdSPSCount_ ) - , pStdSPSs( pStdSPSs_ ) - , stdPPSCount( stdPPSCount_ ) - , pStdPPSs( pStdPPSs_ ) + : pNext{ pNext_ } + , stdSPSCount{ stdSPSCount_ } + , pStdSPSs{ pStdSPSs_ } + , stdPPSCount{ stdPPSCount_ } + , pStdPPSs{ pStdPPSs_ } { } @@ -120452,10 +123136,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxStdPPSCount_ = {}, const VULKAN_HPP_NAMESPACE::VideoEncodeH264SessionParametersAddInfoKHR * pParametersAddInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxStdSPSCount( maxStdSPSCount_ ) - , maxStdPPSCount( maxStdPPSCount_ ) - , pParametersAddInfo( pParametersAddInfo_ ) + : pNext{ pNext_ } + , maxStdSPSCount{ maxStdSPSCount_ } + , maxStdPPSCount{ maxStdPPSCount_ } + , pParametersAddInfo{ pParametersAddInfo_ } { } @@ -120573,9 +123257,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR VideoEncodeH264SessionParametersFeedbackInfoKHR( VULKAN_HPP_NAMESPACE::Bool32 hasStdSPSOverrides_ = {}, VULKAN_HPP_NAMESPACE::Bool32 hasStdPPSOverrides_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , hasStdSPSOverrides( hasStdSPSOverrides_ ) - , hasStdPPSOverrides( hasStdPPSOverrides_ ) + : pNext{ pNext_ } + , hasStdSPSOverrides{ hasStdSPSOverrides_ } + , hasStdPPSOverrides{ hasStdPPSOverrides_ } { } @@ -120663,11 +123347,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t stdSPSId_ = {}, uint32_t stdPPSId_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , writeStdSPS( writeStdSPS_ ) - , writeStdPPS( writeStdPPS_ ) - , stdSPSId( stdSPSId_ ) - , stdPPSId( stdPPSId_ ) + : pNext{ pNext_ } + , writeStdSPS{ writeStdSPS_ } + , writeStdPPS{ writeStdPPS_ } + , stdSPSId{ stdSPSId_ } + , stdPPSId{ stdPPSId_ } { } @@ -120805,23 +123489,23 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 requiresGopRemainingFrames_ = {}, VULKAN_HPP_NAMESPACE::VideoEncodeH265StdFlagsKHR stdSyntaxFlags_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , maxLevelIdc( maxLevelIdc_ ) - , maxSliceSegmentCount( maxSliceSegmentCount_ ) - , maxTiles( maxTiles_ ) - , ctbSizes( ctbSizes_ ) - , transformBlockSizes( transformBlockSizes_ ) - , maxPPictureL0ReferenceCount( maxPPictureL0ReferenceCount_ ) - , maxBPictureL0ReferenceCount( maxBPictureL0ReferenceCount_ ) - , maxL1ReferenceCount( maxL1ReferenceCount_ ) - , maxSubLayerCount( maxSubLayerCount_ ) - , expectDyadicTemporalSubLayerPattern( expectDyadicTemporalSubLayerPattern_ ) - , minQp( minQp_ ) - , maxQp( maxQp_ ) - , prefersGopRemainingFrames( prefersGopRemainingFrames_ ) - , requiresGopRemainingFrames( requiresGopRemainingFrames_ ) - , stdSyntaxFlags( stdSyntaxFlags_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , maxLevelIdc{ maxLevelIdc_ } + , maxSliceSegmentCount{ maxSliceSegmentCount_ } + , maxTiles{ maxTiles_ } + , ctbSizes{ ctbSizes_ } + , transformBlockSizes{ transformBlockSizes_ } + , maxPPictureL0ReferenceCount{ maxPPictureL0ReferenceCount_ } + , maxBPictureL0ReferenceCount{ maxBPictureL0ReferenceCount_ } + , maxL1ReferenceCount{ maxL1ReferenceCount_ } + , maxSubLayerCount{ maxSubLayerCount_ } + , expectDyadicTemporalSubLayerPattern{ expectDyadicTemporalSubLayerPattern_ } + , minQp{ minQp_ } + , maxQp{ maxQp_ } + , prefersGopRemainingFrames{ prefersGopRemainingFrames_ } + , requiresGopRemainingFrames{ requiresGopRemainingFrames_ } + , stdSyntaxFlags{ stdSyntaxFlags_ } { } @@ -120995,8 +123679,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoEncodeH265DpbSlotInfoKHR( const StdVideoEncodeH265ReferenceInfo * pStdReferenceInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , pStdReferenceInfo( pStdReferenceInfo_ ) + : pNext{ pNext_ } + , pStdReferenceInfo{ pStdReferenceInfo_ } { } @@ -121089,9 +123773,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoEncodeH265FrameSizeKHR( uint32_t frameISize_ = {}, uint32_t framePSize_ = {}, uint32_t frameBSize_ = {} ) VULKAN_HPP_NOEXCEPT - : frameISize( frameISize_ ) - , framePSize( framePSize_ ) - , frameBSize( frameBSize_ ) + : frameISize{ frameISize_ } + , framePSize{ framePSize_ } + , frameBSize{ frameBSize_ } { } @@ -121190,11 +123874,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t gopRemainingP_ = {}, uint32_t gopRemainingB_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , useGopRemainingFrames( useGopRemainingFrames_ ) - , gopRemainingI( gopRemainingI_ ) - , gopRemainingP( gopRemainingP_ ) - , gopRemainingB( gopRemainingB_ ) + : pNext{ pNext_ } + , useGopRemainingFrames{ useGopRemainingFrames_ } + , gopRemainingI{ gopRemainingI_ } + , gopRemainingP{ gopRemainingP_ } + , gopRemainingB{ gopRemainingB_ } { } @@ -121319,9 +124003,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR VideoEncodeH265NaluSliceSegmentInfoKHR( int32_t constantQp_ = {}, const StdVideoEncodeH265SliceSegmentHeader * pStdSliceSegmentHeader_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , constantQp( constantQp_ ) - , pStdSliceSegmentHeader( pStdSliceSegmentHeader_ ) + : pNext{ pNext_ } + , constantQp{ constantQp_ } + , pStdSliceSegmentHeader{ pStdSliceSegmentHeader_ } { } @@ -121427,10 +124111,10 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::VideoEncodeH265NaluSliceSegmentInfoKHR * pNaluSliceSegmentEntries_ = {}, const StdVideoEncodeH265PictureInfo * pStdPictureInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , naluSliceSegmentEntryCount( naluSliceSegmentEntryCount_ ) - , pNaluSliceSegmentEntries( pNaluSliceSegmentEntries_ ) - , pStdPictureInfo( pStdPictureInfo_ ) + : pNext{ pNext_ } + , naluSliceSegmentEntryCount{ naluSliceSegmentEntryCount_ } + , pNaluSliceSegmentEntries{ pNaluSliceSegmentEntries_ } + , pStdPictureInfo{ pStdPictureInfo_ } { } @@ -121569,8 +124253,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoEncodeH265ProfileInfoKHR( StdVideoH265ProfileIdc stdProfileIdc_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stdProfileIdc( stdProfileIdc_ ) + : pNext{ pNext_ } + , stdProfileIdc{ stdProfileIdc_ } { } @@ -121668,9 +124352,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoEncodeH265QpKHR( int32_t qpI_ = {}, int32_t qpP_ = {}, int32_t qpB_ = {} ) VULKAN_HPP_NOEXCEPT - : qpI( qpI_ ) - , qpP( qpP_ ) - , qpB( qpB_ ) + : qpI{ qpI_ } + , qpP{ qpP_ } + , qpB{ qpB_ } { } @@ -121773,15 +124457,15 @@ namespace VULKAN_HPP_NAMESPACE uint32_t preferredMaxL0ReferenceCount_ = {}, uint32_t preferredMaxL1ReferenceCount_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , preferredRateControlFlags( preferredRateControlFlags_ ) - , preferredGopFrameCount( preferredGopFrameCount_ ) - , preferredIdrPeriod( preferredIdrPeriod_ ) - , preferredConsecutiveBFrameCount( preferredConsecutiveBFrameCount_ ) - , preferredSubLayerCount( preferredSubLayerCount_ ) - , preferredConstantQp( preferredConstantQp_ ) - , preferredMaxL0ReferenceCount( preferredMaxL0ReferenceCount_ ) - , preferredMaxL1ReferenceCount( preferredMaxL1ReferenceCount_ ) + : pNext{ pNext_ } + , preferredRateControlFlags{ preferredRateControlFlags_ } + , preferredGopFrameCount{ preferredGopFrameCount_ } + , preferredIdrPeriod{ preferredIdrPeriod_ } + , preferredConsecutiveBFrameCount{ preferredConsecutiveBFrameCount_ } + , preferredSubLayerCount{ preferredSubLayerCount_ } + , preferredConstantQp{ preferredConstantQp_ } + , preferredMaxL0ReferenceCount{ preferredMaxL0ReferenceCount_ } + , preferredMaxL1ReferenceCount{ preferredMaxL1ReferenceCount_ } { } @@ -121896,12 +124580,12 @@ namespace VULKAN_HPP_NAMESPACE uint32_t consecutiveBFrameCount_ = {}, uint32_t subLayerCount_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , gopFrameCount( gopFrameCount_ ) - , idrPeriod( idrPeriod_ ) - , consecutiveBFrameCount( consecutiveBFrameCount_ ) - , subLayerCount( subLayerCount_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , gopFrameCount{ gopFrameCount_ } + , idrPeriod{ idrPeriod_ } + , consecutiveBFrameCount{ consecutiveBFrameCount_ } + , subLayerCount{ subLayerCount_ } { } @@ -122037,13 +124721,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 useMaxFrameSize_ = {}, VULKAN_HPP_NAMESPACE::VideoEncodeH265FrameSizeKHR maxFrameSize_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , useMinQp( useMinQp_ ) - , minQp( minQp_ ) - , useMaxQp( useMaxQp_ ) - , maxQp( maxQp_ ) - , useMaxFrameSize( useMaxFrameSize_ ) - , maxFrameSize( maxFrameSize_ ) + : pNext{ pNext_ } + , useMinQp{ useMinQp_ } + , minQp{ minQp_ } + , useMaxQp{ useMaxQp_ } + , maxQp{ maxQp_ } + , useMaxFrameSize{ useMaxFrameSize_ } + , maxFrameSize{ maxFrameSize_ } { } @@ -122184,9 +124868,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR VideoEncodeH265SessionCreateInfoKHR( VULKAN_HPP_NAMESPACE::Bool32 useMaxLevelIdc_ = {}, StdVideoH265LevelIdc maxLevelIdc_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , useMaxLevelIdc( useMaxLevelIdc_ ) - , maxLevelIdc( maxLevelIdc_ ) + : pNext{ pNext_ } + , useMaxLevelIdc{ useMaxLevelIdc_ } + , maxLevelIdc{ maxLevelIdc_ } { } @@ -122303,13 +124987,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t stdPPSCount_ = {}, const StdVideoH265PictureParameterSet * pStdPPSs_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , stdVPSCount( stdVPSCount_ ) - , pStdVPSs( pStdVPSs_ ) - , stdSPSCount( stdSPSCount_ ) - , pStdSPSs( pStdSPSs_ ) - , stdPPSCount( stdPPSCount_ ) - , pStdPPSs( pStdPPSs_ ) + : pNext{ pNext_ } + , stdVPSCount{ stdVPSCount_ } + , pStdVPSs{ pStdVPSs_ } + , stdSPSCount{ stdSPSCount_ } + , pStdSPSs{ pStdSPSs_ } + , stdPPSCount{ stdPPSCount_ } + , pStdPPSs{ pStdPPSs_ } { } @@ -122498,11 +125182,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxStdPPSCount_ = {}, const VULKAN_HPP_NAMESPACE::VideoEncodeH265SessionParametersAddInfoKHR * pParametersAddInfo_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , maxStdVPSCount( maxStdVPSCount_ ) - , maxStdSPSCount( maxStdSPSCount_ ) - , maxStdPPSCount( maxStdPPSCount_ ) - , pParametersAddInfo( pParametersAddInfo_ ) + : pNext{ pNext_ } + , maxStdVPSCount{ maxStdVPSCount_ } + , maxStdSPSCount{ maxStdSPSCount_ } + , maxStdPPSCount{ maxStdPPSCount_ } + , pParametersAddInfo{ pParametersAddInfo_ } { } @@ -122629,10 +125313,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Bool32 hasStdSPSOverrides_ = {}, VULKAN_HPP_NAMESPACE::Bool32 hasStdPPSOverrides_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , hasStdVPSOverrides( hasStdVPSOverrides_ ) - , hasStdSPSOverrides( hasStdSPSOverrides_ ) - , hasStdPPSOverrides( hasStdPPSOverrides_ ) + : pNext{ pNext_ } + , hasStdVPSOverrides{ hasStdVPSOverrides_ } + , hasStdSPSOverrides{ hasStdSPSOverrides_ } + , hasStdPPSOverrides{ hasStdPPSOverrides_ } { } @@ -122727,13 +125411,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t stdSPSId_ = {}, uint32_t stdPPSId_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , writeStdVPS( writeStdVPS_ ) - , writeStdSPS( writeStdSPS_ ) - , writeStdPPS( writeStdPPS_ ) - , stdVPSId( stdVPSId_ ) - , stdSPSId( stdSPSId_ ) - , stdPPSId( stdPPSId_ ) + : pNext{ pNext_ } + , writeStdVPS{ writeStdVPS_ } + , writeStdSPS{ writeStdSPS_ } + , writeStdPPS{ writeStdPPS_ } + , stdVPSId{ stdVPSId_ } + , stdSPSId{ stdSPSId_ } + , stdPPSId{ stdPPSId_ } { } @@ -122880,16 +125564,16 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::VideoReferenceSlotInfoKHR * pReferenceSlots_ = {}, uint32_t precedingExternallyEncodedBytes_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , dstBuffer( dstBuffer_ ) - , dstBufferOffset( dstBufferOffset_ ) - , dstBufferRange( dstBufferRange_ ) - , srcPictureResource( srcPictureResource_ ) - , pSetupReferenceSlot( pSetupReferenceSlot_ ) - , referenceSlotCount( referenceSlotCount_ ) - , pReferenceSlots( pReferenceSlots_ ) - , precedingExternallyEncodedBytes( precedingExternallyEncodedBytes_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , dstBuffer{ dstBuffer_ } + , dstBufferOffset{ dstBufferOffset_ } + , dstBufferRange{ dstBufferRange_ } + , srcPictureResource{ srcPictureResource_ } + , pSetupReferenceSlot{ pSetupReferenceSlot_ } + , referenceSlotCount{ referenceSlotCount_ } + , pReferenceSlots{ pReferenceSlots_ } + , precedingExternallyEncodedBytes{ precedingExternallyEncodedBytes_ } { } @@ -123097,8 +125781,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoEncodeQualityLevelInfoKHR( uint32_t qualityLevel_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , qualityLevel( qualityLevel_ ) + : pNext{ pNext_ } + , qualityLevel{ qualityLevel_ } { } @@ -123196,9 +125880,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VideoEncodeRateControlModeFlagBitsKHR::eDefault, uint32_t preferredRateControlLayerCount_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , preferredRateControlMode( preferredRateControlMode_ ) - , preferredRateControlLayerCount( preferredRateControlLayerCount_ ) + : pNext{ pNext_ } + , preferredRateControlMode{ preferredRateControlMode_ } + , preferredRateControlLayerCount{ preferredRateControlLayerCount_ } { } @@ -123287,11 +125971,11 @@ namespace VULKAN_HPP_NAMESPACE uint32_t frameRateNumerator_ = {}, uint32_t frameRateDenominator_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , averageBitrate( averageBitrate_ ) - , maxBitrate( maxBitrate_ ) - , frameRateNumerator( frameRateNumerator_ ) - , frameRateDenominator( frameRateDenominator_ ) + : pNext{ pNext_ } + , averageBitrate{ averageBitrate_ } + , maxBitrate{ maxBitrate_ } + , frameRateNumerator{ frameRateNumerator_ } + , frameRateDenominator{ frameRateDenominator_ } { } @@ -123415,13 +126099,13 @@ namespace VULKAN_HPP_NAMESPACE uint32_t virtualBufferSizeInMs_ = {}, uint32_t initialVirtualBufferSizeInMs_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , rateControlMode( rateControlMode_ ) - , layerCount( layerCount_ ) - , pLayers( pLayers_ ) - , virtualBufferSizeInMs( virtualBufferSizeInMs_ ) - , initialVirtualBufferSizeInMs( initialVirtualBufferSizeInMs_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , rateControlMode{ rateControlMode_ } + , layerCount{ layerCount_ } + , pLayers{ pLayers_ } + , virtualBufferSizeInMs{ virtualBufferSizeInMs_ } + , initialVirtualBufferSizeInMs{ initialVirtualBufferSizeInMs_ } { } @@ -123592,8 +126276,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoEncodeSessionParametersFeedbackInfoKHR( VULKAN_HPP_NAMESPACE::Bool32 hasOverrides_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , hasOverrides( hasOverrides_ ) + : pNext{ pNext_ } + , hasOverrides{ hasOverrides_ } { } @@ -123675,8 +126359,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoEncodeSessionParametersGetInfoKHR( VULKAN_HPP_NAMESPACE::VideoSessionParametersKHR videoSessionParameters_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , videoSessionParameters( videoSessionParameters_ ) + : pNext{ pNext_ } + , videoSessionParameters{ videoSessionParameters_ } { } @@ -123776,10 +126460,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VideoEncodeContentFlagsKHR videoContentHints_ = {}, VULKAN_HPP_NAMESPACE::VideoEncodeTuningModeKHR tuningMode_ = VULKAN_HPP_NAMESPACE::VideoEncodeTuningModeKHR::eDefault, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , videoUsageHints( videoUsageHints_ ) - , videoContentHints( videoContentHints_ ) - , tuningMode( tuningMode_ ) + : pNext{ pNext_ } + , videoUsageHints{ videoUsageHints_ } + , videoContentHints{ videoContentHints_ } + , tuningMode{ tuningMode_ } { } @@ -123894,8 +126578,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoEndCodingInfoKHR( VULKAN_HPP_NAMESPACE::VideoEndCodingFlagsKHR flags_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) + : pNext{ pNext_ } + , flags{ flags_ } { } @@ -123996,13 +126680,13 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::ImageTiling imageTiling_ = VULKAN_HPP_NAMESPACE::ImageTiling::eOptimal, VULKAN_HPP_NAMESPACE::ImageUsageFlags imageUsageFlags_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , format( format_ ) - , componentMapping( componentMapping_ ) - , imageCreateFlags( imageCreateFlags_ ) - , imageType( imageType_ ) - , imageTiling( imageTiling_ ) - , imageUsageFlags( imageUsageFlags_ ) + : pNext{ pNext_ } + , format{ format_ } + , componentMapping{ componentMapping_ } + , imageCreateFlags{ imageCreateFlags_ } + , imageType{ imageType_ } + , imageTiling{ imageTiling_ } + , imageUsageFlags{ imageUsageFlags_ } { } @@ -124100,10 +126784,10 @@ namespace VULKAN_HPP_NAMESPACE uint32_t firstQuery_ = {}, uint32_t queryCount_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , queryPool( queryPool_ ) - , firstQuery( firstQuery_ ) - , queryCount( queryCount_ ) + : pNext{ pNext_ } + , queryPool{ queryPool_ } + , firstQuery{ firstQuery_ } + , queryCount{ queryCount_ } { } @@ -124215,9 +126899,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR VideoProfileListInfoKHR( uint32_t profileCount_ = {}, const VULKAN_HPP_NAMESPACE::VideoProfileInfoKHR * pProfiles_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , profileCount( profileCount_ ) - , pProfiles( pProfiles_ ) + : pNext{ pNext_ } + , profileCount{ profileCount_ } + , pProfiles{ pProfiles_ } { } @@ -124346,16 +127030,16 @@ namespace VULKAN_HPP_NAMESPACE uint32_t maxActiveReferencePictures_ = {}, const VULKAN_HPP_NAMESPACE::ExtensionProperties * pStdHeaderVersion_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , queueFamilyIndex( queueFamilyIndex_ ) - , flags( flags_ ) - , pVideoProfile( pVideoProfile_ ) - , pictureFormat( pictureFormat_ ) - , maxCodedExtent( maxCodedExtent_ ) - , referencePictureFormat( referencePictureFormat_ ) - , maxDpbSlots( maxDpbSlots_ ) - , maxActiveReferencePictures( maxActiveReferencePictures_ ) - , pStdHeaderVersion( pStdHeaderVersion_ ) + : pNext{ pNext_ } + , queueFamilyIndex{ queueFamilyIndex_ } + , flags{ flags_ } + , pVideoProfile{ pVideoProfile_ } + , pictureFormat{ pictureFormat_ } + , maxCodedExtent{ maxCodedExtent_ } + , referencePictureFormat{ referencePictureFormat_ } + , maxDpbSlots{ maxDpbSlots_ } + , maxActiveReferencePictures{ maxActiveReferencePictures_ } + , pStdHeaderVersion{ pStdHeaderVersion_ } { } @@ -124532,9 +127216,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR VideoSessionMemoryRequirementsKHR( uint32_t memoryBindIndex_ = {}, VULKAN_HPP_NAMESPACE::MemoryRequirements memoryRequirements_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , memoryBindIndex( memoryBindIndex_ ) - , memoryRequirements( memoryRequirements_ ) + : pNext{ pNext_ } + , memoryBindIndex{ memoryBindIndex_ } + , memoryRequirements{ memoryRequirements_ } { } @@ -124619,10 +127303,10 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VideoSessionParametersKHR videoSessionParametersTemplate_ = {}, VULKAN_HPP_NAMESPACE::VideoSessionKHR videoSession_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , videoSessionParametersTemplate( videoSessionParametersTemplate_ ) - , videoSession( videoSession_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , videoSessionParametersTemplate{ videoSessionParametersTemplate_ } + , videoSession{ videoSession_ } { } @@ -124738,8 +127422,8 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR VideoSessionParametersUpdateInfoKHR( uint32_t updateSequenceCount_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , updateSequenceCount( updateSequenceCount_ ) + : pNext{ pNext_ } + , updateSequenceCount{ updateSequenceCount_ } { } @@ -124838,10 +127522,10 @@ namespace VULKAN_HPP_NAMESPACE struct wl_display * display_ = {}, struct wl_surface * surface_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , display( display_ ) - , surface( surface_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , display{ display_ } + , surface{ surface_ } { } @@ -124963,14 +127647,14 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::DeviceMemory * pReleaseSyncs_ = {}, const uint64_t * pReleaseKeys_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , acquireCount( acquireCount_ ) - , pAcquireSyncs( pAcquireSyncs_ ) - , pAcquireKeys( pAcquireKeys_ ) - , pAcquireTimeouts( pAcquireTimeouts_ ) - , releaseCount( releaseCount_ ) - , pReleaseSyncs( pReleaseSyncs_ ) - , pReleaseKeys( pReleaseKeys_ ) + : pNext{ pNext_ } + , acquireCount{ acquireCount_ } + , pAcquireSyncs{ pAcquireSyncs_ } + , pAcquireKeys{ pAcquireKeys_ } + , pAcquireTimeouts{ pAcquireTimeouts_ } + , releaseCount{ releaseCount_ } + , pReleaseSyncs{ pReleaseSyncs_ } + , pReleaseKeys{ pReleaseKeys_ } { } @@ -125228,14 +127912,14 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::DeviceMemory * pReleaseSyncs_ = {}, const uint64_t * pReleaseKeys_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , acquireCount( acquireCount_ ) - , pAcquireSyncs( pAcquireSyncs_ ) - , pAcquireKeys( pAcquireKeys_ ) - , pAcquireTimeoutMilliseconds( pAcquireTimeoutMilliseconds_ ) - , releaseCount( releaseCount_ ) - , pReleaseSyncs( pReleaseSyncs_ ) - , pReleaseKeys( pReleaseKeys_ ) + : pNext{ pNext_ } + , acquireCount{ acquireCount_ } + , pAcquireSyncs{ pAcquireSyncs_ } + , pAcquireKeys{ pAcquireKeys_ } + , pAcquireTimeoutMilliseconds{ pAcquireTimeoutMilliseconds_ } + , releaseCount{ releaseCount_ } + , pReleaseSyncs{ pReleaseSyncs_ } + , pReleaseKeys{ pReleaseKeys_ } { } @@ -125492,10 +128176,10 @@ namespace VULKAN_HPP_NAMESPACE HINSTANCE hinstance_ = {}, HWND hwnd_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , hinstance( hinstance_ ) - , hwnd( hwnd_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , hinstance{ hinstance_ } + , hwnd{ hwnd_ } { } @@ -125611,9 +128295,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR WriteDescriptorSetAccelerationStructureKHR( uint32_t accelerationStructureCount_ = {}, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR * pAccelerationStructures_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , accelerationStructureCount( accelerationStructureCount_ ) - , pAccelerationStructures( pAccelerationStructures_ ) + : pNext{ pNext_ } + , accelerationStructureCount{ accelerationStructureCount_ } + , pAccelerationStructures{ pAccelerationStructures_ } { } @@ -125742,9 +128426,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR WriteDescriptorSetAccelerationStructureNV( uint32_t accelerationStructureCount_ = {}, const VULKAN_HPP_NAMESPACE::AccelerationStructureNV * pAccelerationStructures_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , accelerationStructureCount( accelerationStructureCount_ ) - , pAccelerationStructures( pAccelerationStructures_ ) + : pNext{ pNext_ } + , accelerationStructureCount{ accelerationStructureCount_ } + , pAccelerationStructures{ pAccelerationStructures_ } { } @@ -125872,9 +128556,9 @@ namespace VULKAN_HPP_NAMESPACE #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR WriteDescriptorSetInlineUniformBlock( uint32_t dataSize_ = {}, const void * pData_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , dataSize( dataSize_ ) - , pData( pData_ ) + : pNext{ pNext_ } + , dataSize{ dataSize_ } + , pData{ pData_ } { } @@ -126000,10 +128684,10 @@ namespace VULKAN_HPP_NAMESPACE xcb_connection_t * connection_ = {}, xcb_window_t window_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , connection( connection_ ) - , window( window_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , connection{ connection_ } + , window{ window_ } { } @@ -126132,10 +128816,10 @@ namespace VULKAN_HPP_NAMESPACE Display * dpy_ = {}, Window window_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext( pNext_ ) - , flags( flags_ ) - , dpy( dpy_ ) - , window( window_ ) + : pNext{ pNext_ } + , flags{ flags_ } + , dpy{ dpy_ } + , window{ window_ } { } diff --git a/third_party/vulkan/vulkan_to_string.hpp b/third_party/vulkan/vulkan_to_string.hpp index f32d156..8417c14 100644 --- a/third_party/vulkan/vulkan_to_string.hpp +++ b/third_party/vulkan/vulkan_to_string.hpp @@ -77,14 +77,14 @@ namespace VULKAN_HPP_NAMESPACE result += "CositedChromaSamples | "; if ( value & FormatFeatureFlagBits::eSampledImageFilterMinmax ) result += "SampledImageFilterMinmax | "; - if ( value & FormatFeatureFlagBits::eSampledImageFilterCubicEXT ) - result += "SampledImageFilterCubicEXT | "; if ( value & FormatFeatureFlagBits::eVideoDecodeOutputKHR ) result += "VideoDecodeOutputKHR | "; if ( value & FormatFeatureFlagBits::eVideoDecodeDpbKHR ) result += "VideoDecodeDpbKHR | "; if ( value & FormatFeatureFlagBits::eAccelerationStructureVertexBufferKHR ) result += "AccelerationStructureVertexBufferKHR | "; + if ( value & FormatFeatureFlagBits::eSampledImageFilterCubicEXT ) + result += "SampledImageFilterCubicEXT | "; if ( value & FormatFeatureFlagBits::eFragmentDensityMapEXT ) result += "FragmentDensityMapEXT | "; if ( value & FormatFeatureFlagBits::eFragmentShadingRateAttachmentKHR ) @@ -175,10 +175,10 @@ namespace VULKAN_HPP_NAMESPACE result += "VideoDecodeSrcKHR | "; if ( value & ImageUsageFlagBits::eVideoDecodeDpbKHR ) result += "VideoDecodeDpbKHR | "; - if ( value & ImageUsageFlagBits::eFragmentShadingRateAttachmentKHR ) - result += "FragmentShadingRateAttachmentKHR | "; if ( value & ImageUsageFlagBits::eFragmentDensityMapEXT ) result += "FragmentDensityMapEXT | "; + if ( value & ImageUsageFlagBits::eFragmentShadingRateAttachmentKHR ) + result += "FragmentShadingRateAttachmentKHR | "; if ( value & ImageUsageFlagBits::eHostTransferEXT ) result += "HostTransferEXT | "; if ( value & ImageUsageFlagBits::eVideoEncodeDstKHR ) @@ -368,16 +368,16 @@ namespace VULKAN_HPP_NAMESPACE result += "AccelerationStructureBuildKHR | "; if ( value & PipelineStageFlagBits::eRayTracingShaderKHR ) result += "RayTracingShaderKHR | "; + if ( value & PipelineStageFlagBits::eFragmentDensityProcessEXT ) + result += "FragmentDensityProcessEXT | "; if ( value & PipelineStageFlagBits::eFragmentShadingRateAttachmentKHR ) result += "FragmentShadingRateAttachmentKHR | "; + if ( value & PipelineStageFlagBits::eCommandPreprocessNV ) + result += "CommandPreprocessNV | "; if ( value & PipelineStageFlagBits::eTaskShaderEXT ) result += "TaskShaderEXT | "; if ( value & PipelineStageFlagBits::eMeshShaderEXT ) result += "MeshShaderEXT | "; - if ( value & PipelineStageFlagBits::eFragmentDensityProcessEXT ) - result += "FragmentDensityProcessEXT | "; - if ( value & PipelineStageFlagBits::eCommandPreprocessNV ) - result += "CommandPreprocessNV | "; return "{ " + result.substr( 0, result.size() - 3 ) + " }"; } @@ -966,10 +966,10 @@ namespace VULKAN_HPP_NAMESPACE result += "DescriptorBufferEXT | "; if ( value & DescriptorSetLayoutCreateFlagBits::eEmbeddedImmutableSamplersEXT ) result += "EmbeddedImmutableSamplersEXT | "; - if ( value & DescriptorSetLayoutCreateFlagBits::eHostOnlyPoolEXT ) - result += "HostOnlyPoolEXT | "; if ( value & DescriptorSetLayoutCreateFlagBits::eIndirectBindableNV ) result += "IndirectBindableNV | "; + if ( value & DescriptorSetLayoutCreateFlagBits::eHostOnlyPoolEXT ) + result += "HostOnlyPoolEXT | "; if ( value & DescriptorSetLayoutCreateFlagBits::ePerStageNV ) result += "PerStageNV | "; @@ -1030,10 +1030,10 @@ namespace VULKAN_HPP_NAMESPACE result += "AccelerationStructureReadKHR | "; if ( value & AccessFlagBits::eAccelerationStructureWriteKHR ) result += "AccelerationStructureWriteKHR | "; - if ( value & AccessFlagBits::eFragmentShadingRateAttachmentReadKHR ) - result += "FragmentShadingRateAttachmentReadKHR | "; if ( value & AccessFlagBits::eFragmentDensityMapReadEXT ) result += "FragmentDensityMapReadEXT | "; + if ( value & AccessFlagBits::eFragmentShadingRateAttachmentReadKHR ) + result += "FragmentShadingRateAttachmentReadKHR | "; if ( value & AccessFlagBits::eCommandPreprocessReadNV ) result += "CommandPreprocessReadNV | "; if ( value & AccessFlagBits::eCommandPreprocessWriteNV ) @@ -1747,10 +1747,10 @@ namespace VULKAN_HPP_NAMESPACE result += "Suspending | "; if ( value & RenderingFlagBits::eResuming ) result += "Resuming | "; - if ( value & RenderingFlagBits::eContentsInlineEXT ) - result += "ContentsInlineEXT | "; if ( value & RenderingFlagBits::eEnableLegacyDitheringEXT ) result += "EnableLegacyDitheringEXT | "; + if ( value & RenderingFlagBits::eContentsInlineKHR ) + result += "ContentsInlineKHR | "; return "{ " + result.substr( 0, result.size() - 3 ) + " }"; } @@ -2915,8 +2915,8 @@ namespace VULKAN_HPP_NAMESPACE std::string result; if ( value & VideoEncodeCapabilityFlagBitsKHR::ePrecedingExternallyEncodedBytes ) result += "PrecedingExternallyEncodedBytes | "; - if ( value & VideoEncodeCapabilityFlagBitsKHR::eInsufficientstreamBufferRangeDetectionBit ) - result += "InsufficientstreamBufferRangeDetectionBit | "; + if ( value & VideoEncodeCapabilityFlagBitsKHR::eInsufficientBitstreamBufferRangeDetection ) + result += "InsufficientBitstreamBufferRangeDetection | "; return "{ " + result.substr( 0, result.size() - 3 ) + " }"; } @@ -2927,12 +2927,12 @@ namespace VULKAN_HPP_NAMESPACE return "{}"; std::string result; - if ( value & VideoEncodeFeedbackFlagBitsKHR::estreamBufferOffsetBit ) - result += "streamBufferOffsetBit | "; - if ( value & VideoEncodeFeedbackFlagBitsKHR::estreamBytesWrittenBit ) - result += "streamBytesWrittenBit | "; - if ( value & VideoEncodeFeedbackFlagBitsKHR::estreamHasOverridesBit ) - result += "streamHasOverridesBit | "; + if ( value & VideoEncodeFeedbackFlagBitsKHR::eBitstreamBufferOffset ) + result += "BitstreamBufferOffset | "; + if ( value & VideoEncodeFeedbackFlagBitsKHR::eBitstreamBytesWritten ) + result += "BitstreamBytesWritten | "; + if ( value & VideoEncodeFeedbackFlagBitsKHR::eBitstreamHasOverrides ) + result += "BitstreamHasOverrides | "; return "{ " + result.substr( 0, result.size() - 3 ) + " }"; } @@ -3372,6 +3372,8 @@ namespace VULKAN_HPP_NAMESPACE result += "AllowDerivatives | "; if ( value & PipelineCreateFlagBits2KHR::eDerivative ) result += "Derivative | "; + if ( value & PipelineCreateFlagBits2KHR::eEnableLegacyDitheringEXT ) + result += "EnableLegacyDitheringEXT | "; if ( value & PipelineCreateFlagBits2KHR::eViewIndexFromDeviceIndex ) result += "ViewIndexFromDeviceIndex | "; if ( value & PipelineCreateFlagBits2KHR::eDispatchBase ) @@ -3428,6 +3430,8 @@ namespace VULKAN_HPP_NAMESPACE result += "RayTracingDisplacementMicromapNV | "; if ( value & PipelineCreateFlagBits2KHR::eDescriptorBufferEXT ) result += "DescriptorBufferEXT | "; + if ( value & PipelineCreateFlagBits2KHR::eCaptureData ) + result += "CaptureData | "; return "{ " + result.substr( 0, result.size() - 3 ) + " }"; } @@ -3592,6 +3596,8 @@ namespace VULKAN_HPP_NAMESPACE case Result::eErrorInvalidVideoStdParametersKHR: return "ErrorInvalidVideoStdParametersKHR"; case Result::eErrorCompressionExhaustedEXT: return "ErrorCompressionExhaustedEXT"; case Result::eIncompatibleShaderBinaryEXT: return "IncompatibleShaderBinaryEXT"; + case Result::ePipelineBinaryMissingKHR: return "PipelineBinaryMissingKHR"; + case Result::eErrorNotEnoughSpaceKHR: return "ErrorNotEnoughSpaceKHR"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -4091,13 +4097,11 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePipelineRepresentativeFragmentTestStateCreateInfoNV: return "PipelineRepresentativeFragmentTestStateCreateInfoNV"; case StructureType::ePhysicalDeviceImageViewImageFormatInfoEXT: return "PhysicalDeviceImageViewImageFormatInfoEXT"; case StructureType::eFilterCubicImageViewImageFormatPropertiesEXT: return "FilterCubicImageViewImageFormatPropertiesEXT"; - case StructureType::eDeviceQueueGlobalPriorityCreateInfoKHR: return "DeviceQueueGlobalPriorityCreateInfoKHR"; case StructureType::eImportMemoryHostPointerInfoEXT: return "ImportMemoryHostPointerInfoEXT"; case StructureType::eMemoryHostPointerPropertiesEXT: return "MemoryHostPointerPropertiesEXT"; case StructureType::ePhysicalDeviceExternalMemoryHostPropertiesEXT: return "PhysicalDeviceExternalMemoryHostPropertiesEXT"; case StructureType::ePhysicalDeviceShaderClockFeaturesKHR: return "PhysicalDeviceShaderClockFeaturesKHR"; case StructureType::ePipelineCompilerControlCreateInfoAMD: return "PipelineCompilerControlCreateInfoAMD"; - case StructureType::eCalibratedTimestampInfoKHR: return "CalibratedTimestampInfoKHR"; case StructureType::ePhysicalDeviceShaderCorePropertiesAMD: return "PhysicalDeviceShaderCorePropertiesAMD"; case StructureType::eVideoDecodeH265CapabilitiesKHR: return "VideoDecodeH265CapabilitiesKHR"; case StructureType::eVideoDecodeH265SessionParametersCreateInfoKHR: return "VideoDecodeH265SessionParametersCreateInfoKHR"; @@ -4105,19 +4109,16 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::eVideoDecodeH265ProfileInfoKHR: return "VideoDecodeH265ProfileInfoKHR"; case StructureType::eVideoDecodeH265PictureInfoKHR: return "VideoDecodeH265PictureInfoKHR"; case StructureType::eVideoDecodeH265DpbSlotInfoKHR: return "VideoDecodeH265DpbSlotInfoKHR"; + case StructureType::eDeviceQueueGlobalPriorityCreateInfoKHR: return "DeviceQueueGlobalPriorityCreateInfoKHR"; case StructureType::ePhysicalDeviceGlobalPriorityQueryFeaturesKHR: return "PhysicalDeviceGlobalPriorityQueryFeaturesKHR"; case StructureType::eQueueFamilyGlobalPriorityPropertiesKHR: return "QueueFamilyGlobalPriorityPropertiesKHR"; case StructureType::eDeviceMemoryOverallocationCreateInfoAMD: return "DeviceMemoryOverallocationCreateInfoAMD"; case StructureType::ePhysicalDeviceVertexAttributeDivisorPropertiesEXT: return "PhysicalDeviceVertexAttributeDivisorPropertiesEXT"; - case StructureType::ePipelineVertexInputDivisorStateCreateInfoKHR: return "PipelineVertexInputDivisorStateCreateInfoKHR"; - case StructureType::ePhysicalDeviceVertexAttributeDivisorFeaturesKHR: return "PhysicalDeviceVertexAttributeDivisorFeaturesKHR"; #if defined( VK_USE_PLATFORM_GGP ) case StructureType::ePresentFrameTokenGGP: return "PresentFrameTokenGGP"; #endif /*VK_USE_PLATFORM_GGP*/ - case StructureType::ePhysicalDeviceComputeShaderDerivativesFeaturesNV: return "PhysicalDeviceComputeShaderDerivativesFeaturesNV"; case StructureType::ePhysicalDeviceMeshShaderFeaturesNV: return "PhysicalDeviceMeshShaderFeaturesNV"; case StructureType::ePhysicalDeviceMeshShaderPropertiesNV: return "PhysicalDeviceMeshShaderPropertiesNV"; - case StructureType::ePhysicalDeviceFragmentShaderBarycentricFeaturesKHR: return "PhysicalDeviceFragmentShaderBarycentricFeaturesKHR"; case StructureType::ePhysicalDeviceShaderImageFootprintFeaturesNV: return "PhysicalDeviceShaderImageFootprintFeaturesNV"; case StructureType::ePipelineViewportExclusiveScissorStateCreateInfoNV: return "PipelineViewportExclusiveScissorStateCreateInfoNV"; case StructureType::ePhysicalDeviceExclusiveScissorFeaturesNV: return "PhysicalDeviceExclusiveScissorFeaturesNV"; @@ -4180,11 +4181,7 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::eSurfaceFullScreenExclusiveWin32InfoEXT: return "SurfaceFullScreenExclusiveWin32InfoEXT"; #endif /*VK_USE_PLATFORM_WIN32_KHR*/ case StructureType::eHeadlessSurfaceCreateInfoEXT: return "HeadlessSurfaceCreateInfoEXT"; - case StructureType::ePhysicalDeviceLineRasterizationFeaturesKHR: return "PhysicalDeviceLineRasterizationFeaturesKHR"; - case StructureType::ePipelineRasterizationLineStateCreateInfoKHR: return "PipelineRasterizationLineStateCreateInfoKHR"; - case StructureType::ePhysicalDeviceLineRasterizationPropertiesKHR: return "PhysicalDeviceLineRasterizationPropertiesKHR"; case StructureType::ePhysicalDeviceShaderAtomicFloatFeaturesEXT: return "PhysicalDeviceShaderAtomicFloatFeaturesEXT"; - case StructureType::ePhysicalDeviceIndexTypeUint8FeaturesKHR: return "PhysicalDeviceIndexTypeUint8FeaturesKHR"; case StructureType::ePhysicalDeviceExtendedDynamicStateFeaturesEXT: return "PhysicalDeviceExtendedDynamicStateFeaturesEXT"; case StructureType::ePhysicalDevicePipelineExecutablePropertiesFeaturesKHR: return "PhysicalDevicePipelineExecutablePropertiesFeaturesKHR"; case StructureType::ePipelineInfoKHR: return "PipelineInfoKHR"; @@ -4301,6 +4298,7 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDeviceGraphicsPipelineLibraryPropertiesEXT: return "PhysicalDeviceGraphicsPipelineLibraryPropertiesEXT"; case StructureType::eGraphicsPipelineLibraryCreateInfoEXT: return "GraphicsPipelineLibraryCreateInfoEXT"; case StructureType::ePhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD: return "PhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD"; + case StructureType::ePhysicalDeviceFragmentShaderBarycentricFeaturesKHR: return "PhysicalDeviceFragmentShaderBarycentricFeaturesKHR"; case StructureType::ePhysicalDeviceFragmentShaderBarycentricPropertiesKHR: return "PhysicalDeviceFragmentShaderBarycentricPropertiesKHR"; case StructureType::ePhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR: return "PhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR"; case StructureType::ePhysicalDeviceFragmentShadingRateEnumsPropertiesNV: return "PhysicalDeviceFragmentShadingRateEnumsPropertiesNV"; @@ -4318,21 +4316,16 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR: return "PhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR"; case StructureType::ePhysicalDeviceImageCompressionControlFeaturesEXT: return "PhysicalDeviceImageCompressionControlFeaturesEXT"; case StructureType::eImageCompressionControlEXT: return "ImageCompressionControlEXT"; - case StructureType::eSubresourceLayout2KHR: return "SubresourceLayout2KHR"; - case StructureType::eImageSubresource2KHR: return "ImageSubresource2KHR"; case StructureType::eImageCompressionPropertiesEXT: return "ImageCompressionPropertiesEXT"; case StructureType::ePhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT: return "PhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT"; case StructureType::ePhysicalDevice4444FormatsFeaturesEXT: return "PhysicalDevice4444FormatsFeaturesEXT"; case StructureType::ePhysicalDeviceFaultFeaturesEXT: return "PhysicalDeviceFaultFeaturesEXT"; case StructureType::eDeviceFaultCountsEXT: return "DeviceFaultCountsEXT"; case StructureType::eDeviceFaultInfoEXT: return "DeviceFaultInfoEXT"; - case StructureType::ePhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT: return "PhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT"; case StructureType::ePhysicalDeviceRgba10X6FormatsFeaturesEXT: return "PhysicalDeviceRgba10X6FormatsFeaturesEXT"; #if defined( VK_USE_PLATFORM_DIRECTFB_EXT ) case StructureType::eDirectfbSurfaceCreateInfoEXT: return "DirectfbSurfaceCreateInfoEXT"; #endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ - case StructureType::ePhysicalDeviceMutableDescriptorTypeFeaturesEXT: return "PhysicalDeviceMutableDescriptorTypeFeaturesEXT"; - case StructureType::eMutableDescriptorTypeCreateInfoEXT: return "MutableDescriptorTypeCreateInfoEXT"; case StructureType::ePhysicalDeviceVertexInputDynamicStateFeaturesEXT: return "PhysicalDeviceVertexInputDynamicStateFeaturesEXT"; case StructureType::eVertexInputBindingDescription2EXT: return "VertexInputBindingDescription2EXT"; case StructureType::eVertexInputAttributeDescription2EXT: return "VertexInputAttributeDescription2EXT"; @@ -4456,6 +4449,7 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDeviceShaderModuleIdentifierPropertiesEXT: return "PhysicalDeviceShaderModuleIdentifierPropertiesEXT"; case StructureType::ePipelineShaderStageModuleIdentifierCreateInfoEXT: return "PipelineShaderStageModuleIdentifierCreateInfoEXT"; case StructureType::eShaderModuleIdentifierEXT: return "ShaderModuleIdentifierEXT"; + case StructureType::ePhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT: return "PhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT"; case StructureType::ePhysicalDeviceOpticalFlowFeaturesNV: return "PhysicalDeviceOpticalFlowFeaturesNV"; case StructureType::ePhysicalDeviceOpticalFlowPropertiesNV: return "PhysicalDeviceOpticalFlowPropertiesNV"; case StructureType::eOpticalFlowImageFormatInfoNV: return "OpticalFlowImageFormatInfoNV"; @@ -4474,12 +4468,27 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDeviceMaintenance5PropertiesKHR: return "PhysicalDeviceMaintenance5PropertiesKHR"; case StructureType::eRenderingAreaInfoKHR: return "RenderingAreaInfoKHR"; case StructureType::eDeviceImageSubresourceInfoKHR: return "DeviceImageSubresourceInfoKHR"; + case StructureType::eSubresourceLayout2KHR: return "SubresourceLayout2KHR"; + case StructureType::eImageSubresource2KHR: return "ImageSubresource2KHR"; case StructureType::ePipelineCreateFlags2CreateInfoKHR: return "PipelineCreateFlags2CreateInfoKHR"; case StructureType::eBufferUsageFlags2CreateInfoKHR: return "BufferUsageFlags2CreateInfoKHR"; + case StructureType::ePhysicalDeviceAntiLagFeaturesAMD: return "PhysicalDeviceAntiLagFeaturesAMD"; + case StructureType::eAntiLagDataAMD: return "AntiLagDataAMD"; + case StructureType::eAntiLagPresentationInfoAMD: return "AntiLagPresentationInfoAMD"; case StructureType::ePhysicalDeviceRayTracingPositionFetchFeaturesKHR: return "PhysicalDeviceRayTracingPositionFetchFeaturesKHR"; case StructureType::ePhysicalDeviceShaderObjectFeaturesEXT: return "PhysicalDeviceShaderObjectFeaturesEXT"; case StructureType::ePhysicalDeviceShaderObjectPropertiesEXT: return "PhysicalDeviceShaderObjectPropertiesEXT"; case StructureType::eShaderCreateInfoEXT: return "ShaderCreateInfoEXT"; + case StructureType::ePhysicalDevicePipelineBinaryFeaturesKHR: return "PhysicalDevicePipelineBinaryFeaturesKHR"; + case StructureType::ePipelineBinaryCreateInfoKHR: return "PipelineBinaryCreateInfoKHR"; + case StructureType::ePipelineBinaryInfoKHR: return "PipelineBinaryInfoKHR"; + case StructureType::ePipelineBinaryKeyKHR: return "PipelineBinaryKeyKHR"; + case StructureType::ePhysicalDevicePipelineBinaryPropertiesKHR: return "PhysicalDevicePipelineBinaryPropertiesKHR"; + case StructureType::eReleaseCapturedPipelineDataInfoKHR: return "ReleaseCapturedPipelineDataInfoKHR"; + case StructureType::ePipelineBinaryDataInfoKHR: return "PipelineBinaryDataInfoKHR"; + case StructureType::ePipelineCreateInfoKHR: return "PipelineCreateInfoKHR"; + case StructureType::eDevicePipelineBinaryInternalCacheControlKHR: return "DevicePipelineBinaryInternalCacheControlKHR"; + case StructureType::ePipelineBinaryHandlesInfoKHR: return "PipelineBinaryHandlesInfoKHR"; case StructureType::ePhysicalDeviceTilePropertiesFeaturesQCOM: return "PhysicalDeviceTilePropertiesFeaturesQCOM"; case StructureType::eTilePropertiesQCOM: return "TilePropertiesQCOM"; case StructureType::ePhysicalDeviceAmigoProfilingFeaturesSEC: return "PhysicalDeviceAmigoProfilingFeaturesSEC"; @@ -4489,6 +4498,10 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDeviceRayTracingInvocationReorderPropertiesNV: return "PhysicalDeviceRayTracingInvocationReorderPropertiesNV"; case StructureType::ePhysicalDeviceExtendedSparseAddressSpaceFeaturesNV: return "PhysicalDeviceExtendedSparseAddressSpaceFeaturesNV"; case StructureType::ePhysicalDeviceExtendedSparseAddressSpacePropertiesNV: return "PhysicalDeviceExtendedSparseAddressSpacePropertiesNV"; + case StructureType::ePhysicalDeviceMutableDescriptorTypeFeaturesEXT: return "PhysicalDeviceMutableDescriptorTypeFeaturesEXT"; + case StructureType::eMutableDescriptorTypeCreateInfoEXT: return "MutableDescriptorTypeCreateInfoEXT"; + case StructureType::ePhysicalDeviceLegacyVertexAttributesFeaturesEXT: return "PhysicalDeviceLegacyVertexAttributesFeaturesEXT"; + case StructureType::ePhysicalDeviceLegacyVertexAttributesPropertiesEXT: return "PhysicalDeviceLegacyVertexAttributesPropertiesEXT"; case StructureType::eLayerSettingsCreateInfoEXT: return "LayerSettingsCreateInfoEXT"; case StructureType::ePhysicalDeviceShaderCoreBuiltinsFeaturesARM: return "PhysicalDeviceShaderCoreBuiltinsFeaturesARM"; case StructureType::ePhysicalDeviceShaderCoreBuiltinsPropertiesARM: return "PhysicalDeviceShaderCoreBuiltinsPropertiesARM"; @@ -4508,6 +4521,8 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDeviceCooperativeMatrixPropertiesKHR: return "PhysicalDeviceCooperativeMatrixPropertiesKHR"; case StructureType::ePhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM: return "PhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM"; case StructureType::eMultiviewPerViewRenderAreasRenderPassBeginInfoQCOM: return "MultiviewPerViewRenderAreasRenderPassBeginInfoQCOM"; + case StructureType::ePhysicalDeviceComputeShaderDerivativesFeaturesKHR: return "PhysicalDeviceComputeShaderDerivativesFeaturesKHR"; + case StructureType::ePhysicalDeviceComputeShaderDerivativesPropertiesKHR: return "PhysicalDeviceComputeShaderDerivativesPropertiesKHR"; case StructureType::eVideoDecodeAv1CapabilitiesKHR: return "VideoDecodeAv1CapabilitiesKHR"; case StructureType::eVideoDecodeAv1PictureInfoKHR: return "VideoDecodeAv1PictureInfoKHR"; case StructureType::eVideoDecodeAv1ProfileInfoKHR: return "VideoDecodeAv1ProfileInfoKHR"; @@ -4527,6 +4542,8 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDeviceCubicClampFeaturesQCOM: return "PhysicalDeviceCubicClampFeaturesQCOM"; case StructureType::ePhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT: return "PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT"; case StructureType::ePhysicalDeviceVertexAttributeDivisorPropertiesKHR: return "PhysicalDeviceVertexAttributeDivisorPropertiesKHR"; + case StructureType::ePipelineVertexInputDivisorStateCreateInfoKHR: return "PipelineVertexInputDivisorStateCreateInfoKHR"; + case StructureType::ePhysicalDeviceVertexAttributeDivisorFeaturesKHR: return "PhysicalDeviceVertexAttributeDivisorFeaturesKHR"; case StructureType::ePhysicalDeviceShaderFloatControls2FeaturesKHR: return "PhysicalDeviceShaderFloatControls2FeaturesKHR"; #if defined( VK_USE_PLATFORM_SCREEN_QNX ) case StructureType::eScreenBufferPropertiesQNX: return "ScreenBufferPropertiesQNX"; @@ -4536,6 +4553,11 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDeviceExternalMemoryScreenBufferFeaturesQNX: return "PhysicalDeviceExternalMemoryScreenBufferFeaturesQNX"; #endif /*VK_USE_PLATFORM_SCREEN_QNX*/ case StructureType::ePhysicalDeviceLayeredDriverPropertiesMSFT: return "PhysicalDeviceLayeredDriverPropertiesMSFT"; + case StructureType::ePhysicalDeviceIndexTypeUint8FeaturesKHR: return "PhysicalDeviceIndexTypeUint8FeaturesKHR"; + case StructureType::ePhysicalDeviceLineRasterizationFeaturesKHR: return "PhysicalDeviceLineRasterizationFeaturesKHR"; + case StructureType::ePipelineRasterizationLineStateCreateInfoKHR: return "PipelineRasterizationLineStateCreateInfoKHR"; + case StructureType::ePhysicalDeviceLineRasterizationPropertiesKHR: return "PhysicalDeviceLineRasterizationPropertiesKHR"; + case StructureType::eCalibratedTimestampInfoKHR: return "CalibratedTimestampInfoKHR"; case StructureType::ePhysicalDeviceShaderExpectAssumeFeaturesKHR: return "PhysicalDeviceShaderExpectAssumeFeaturesKHR"; case StructureType::ePhysicalDeviceMaintenance6FeaturesKHR: return "PhysicalDeviceMaintenance6FeaturesKHR"; case StructureType::ePhysicalDeviceMaintenance6PropertiesKHR: return "PhysicalDeviceMaintenance6PropertiesKHR"; @@ -4548,8 +4570,19 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::eBindDescriptorBufferEmbeddedSamplersInfoEXT: return "BindDescriptorBufferEmbeddedSamplersInfoEXT"; case StructureType::ePhysicalDeviceDescriptorPoolOverallocationFeaturesNV: return "PhysicalDeviceDescriptorPoolOverallocationFeaturesNV"; case StructureType::ePhysicalDeviceRawAccessChainsFeaturesNV: return "PhysicalDeviceRawAccessChainsFeaturesNV"; + case StructureType::ePhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR: return "PhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR"; + case StructureType::ePhysicalDeviceCommandBufferInheritanceFeaturesNV: return "PhysicalDeviceCommandBufferInheritanceFeaturesNV"; + case StructureType::ePhysicalDeviceMaintenance7FeaturesKHR: return "PhysicalDeviceMaintenance7FeaturesKHR"; + case StructureType::ePhysicalDeviceMaintenance7PropertiesKHR: return "PhysicalDeviceMaintenance7PropertiesKHR"; + case StructureType::ePhysicalDeviceLayeredApiPropertiesListKHR: return "PhysicalDeviceLayeredApiPropertiesListKHR"; + case StructureType::ePhysicalDeviceLayeredApiPropertiesKHR: return "PhysicalDeviceLayeredApiPropertiesKHR"; + case StructureType::ePhysicalDeviceLayeredApiVulkanPropertiesKHR: return "PhysicalDeviceLayeredApiVulkanPropertiesKHR"; case StructureType::ePhysicalDeviceShaderAtomicFloat16VectorFeaturesNV: return "PhysicalDeviceShaderAtomicFloat16VectorFeaturesNV"; + case StructureType::ePhysicalDeviceShaderReplicatedCompositesFeaturesEXT: return "PhysicalDeviceShaderReplicatedCompositesFeaturesEXT"; case StructureType::ePhysicalDeviceRayTracingValidationFeaturesNV: return "PhysicalDeviceRayTracingValidationFeaturesNV"; + case StructureType::ePhysicalDeviceImageAlignmentControlFeaturesMESA: return "PhysicalDeviceImageAlignmentControlFeaturesMESA"; + case StructureType::ePhysicalDeviceImageAlignmentControlPropertiesMESA: return "PhysicalDeviceImageAlignmentControlPropertiesMESA"; + case StructureType::eImageAlignmentControlCreateInfoMESA: return "ImageAlignmentControlCreateInfoMESA"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -4622,6 +4655,7 @@ namespace VULKAN_HPP_NAMESPACE case ObjectType::eMicromapEXT: return "MicromapEXT"; case ObjectType::eOpticalFlowSessionNV: return "OpticalFlowSessionNV"; case ObjectType::eShaderEXT: return "ShaderEXT"; + case ObjectType::ePipelineBinaryKHR: return "PipelineBinaryKHR"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -4630,6 +4664,7 @@ namespace VULKAN_HPP_NAMESPACE { switch ( value ) { + case VendorId::eKhronos: return "Khronos"; case VendorId::eVIV: return "VIV"; case VendorId::eVSI: return "VSI"; case VendorId::eKazan: return "Kazan"; @@ -4892,7 +4927,7 @@ namespace VULKAN_HPP_NAMESPACE case Format::ePvrtc14BppSrgbBlockIMG: return "Pvrtc14BppSrgbBlockIMG"; case Format::ePvrtc22BppSrgbBlockIMG: return "Pvrtc22BppSrgbBlockIMG"; case Format::ePvrtc24BppSrgbBlockIMG: return "Pvrtc24BppSrgbBlockIMG"; - case Format::eR16G16S105NV: return "R16G16S105NV"; + case Format::eR16G16Sfixed5NV: return "R16G16Sfixed5NV"; case Format::eA1B5G5R5UnormPack16KHR: return "A1B5G5R5UnormPack16KHR"; case Format::eA8UnormKHR: return "A8UnormKHR"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; @@ -4927,10 +4962,10 @@ namespace VULKAN_HPP_NAMESPACE case FormatFeatureFlagBits::eDisjoint: return "Disjoint"; case FormatFeatureFlagBits::eCositedChromaSamples: return "CositedChromaSamples"; case FormatFeatureFlagBits::eSampledImageFilterMinmax: return "SampledImageFilterMinmax"; - case FormatFeatureFlagBits::eSampledImageFilterCubicEXT: return "SampledImageFilterCubicEXT"; case FormatFeatureFlagBits::eVideoDecodeOutputKHR: return "VideoDecodeOutputKHR"; case FormatFeatureFlagBits::eVideoDecodeDpbKHR: return "VideoDecodeDpbKHR"; case FormatFeatureFlagBits::eAccelerationStructureVertexBufferKHR: return "AccelerationStructureVertexBufferKHR"; + case FormatFeatureFlagBits::eSampledImageFilterCubicEXT: return "SampledImageFilterCubicEXT"; case FormatFeatureFlagBits::eFragmentDensityMapEXT: return "FragmentDensityMapEXT"; case FormatFeatureFlagBits::eFragmentShadingRateAttachmentKHR: return "FragmentShadingRateAttachmentKHR"; case FormatFeatureFlagBits::eVideoEncodeInputKHR: return "VideoEncodeInputKHR"; @@ -5004,8 +5039,8 @@ namespace VULKAN_HPP_NAMESPACE case ImageUsageFlagBits::eVideoDecodeDstKHR: return "VideoDecodeDstKHR"; case ImageUsageFlagBits::eVideoDecodeSrcKHR: return "VideoDecodeSrcKHR"; case ImageUsageFlagBits::eVideoDecodeDpbKHR: return "VideoDecodeDpbKHR"; - case ImageUsageFlagBits::eFragmentShadingRateAttachmentKHR: return "FragmentShadingRateAttachmentKHR"; case ImageUsageFlagBits::eFragmentDensityMapEXT: return "FragmentDensityMapEXT"; + case ImageUsageFlagBits::eFragmentShadingRateAttachmentKHR: return "FragmentShadingRateAttachmentKHR"; case ImageUsageFlagBits::eHostTransferEXT: return "HostTransferEXT"; case ImageUsageFlagBits::eVideoEncodeDstKHR: return "VideoEncodeDstKHR"; case ImageUsageFlagBits::eVideoEncodeSrcKHR: return "VideoEncodeSrcKHR"; @@ -5160,11 +5195,11 @@ namespace VULKAN_HPP_NAMESPACE case PipelineStageFlagBits::eConditionalRenderingEXT: return "ConditionalRenderingEXT"; case PipelineStageFlagBits::eAccelerationStructureBuildKHR: return "AccelerationStructureBuildKHR"; case PipelineStageFlagBits::eRayTracingShaderKHR: return "RayTracingShaderKHR"; + case PipelineStageFlagBits::eFragmentDensityProcessEXT: return "FragmentDensityProcessEXT"; case PipelineStageFlagBits::eFragmentShadingRateAttachmentKHR: return "FragmentShadingRateAttachmentKHR"; + case PipelineStageFlagBits::eCommandPreprocessNV: return "CommandPreprocessNV"; case PipelineStageFlagBits::eTaskShaderEXT: return "TaskShaderEXT"; case PipelineStageFlagBits::eMeshShaderEXT: return "MeshShaderEXT"; - case PipelineStageFlagBits::eFragmentDensityProcessEXT: return "FragmentDensityProcessEXT"; - case PipelineStageFlagBits::eCommandPreprocessNV: return "CommandPreprocessNV"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -5398,8 +5433,8 @@ namespace VULKAN_HPP_NAMESPACE case ImageLayout::eVideoDecodeSrcKHR: return "VideoDecodeSrcKHR"; case ImageLayout::eVideoDecodeDpbKHR: return "VideoDecodeDpbKHR"; case ImageLayout::eSharedPresentKHR: return "SharedPresentKHR"; - case ImageLayout::eFragmentShadingRateAttachmentOptimalKHR: return "FragmentShadingRateAttachmentOptimalKHR"; case ImageLayout::eFragmentDensityMapOptimalEXT: return "FragmentDensityMapOptimalEXT"; + case ImageLayout::eFragmentShadingRateAttachmentOptimalKHR: return "FragmentShadingRateAttachmentOptimalKHR"; case ImageLayout::eRenderingLocalReadKHR: return "RenderingLocalReadKHR"; case ImageLayout::eVideoEncodeDstKHR: return "VideoEncodeDstKHR"; case ImageLayout::eVideoEncodeSrcKHR: return "VideoEncodeSrcKHR"; @@ -5629,7 +5664,6 @@ namespace VULKAN_HPP_NAMESPACE case DynamicState::eExclusiveScissorEnableNV: return "ExclusiveScissorEnableNV"; case DynamicState::eExclusiveScissorNV: return "ExclusiveScissorNV"; case DynamicState::eFragmentShadingRateKHR: return "FragmentShadingRateKHR"; - case DynamicState::eLineStippleKHR: return "LineStippleKHR"; case DynamicState::eVertexInputEXT: return "VertexInputEXT"; case DynamicState::ePatchControlPointsEXT: return "PatchControlPointsEXT"; case DynamicState::eLogicOpEXT: return "LogicOpEXT"; @@ -5666,6 +5700,7 @@ namespace VULKAN_HPP_NAMESPACE case DynamicState::eRepresentativeFragmentTestEnableNV: return "RepresentativeFragmentTestEnableNV"; case DynamicState::eCoverageReductionModeNV: return "CoverageReductionModeNV"; case DynamicState::eAttachmentFeedbackLoopEnableEXT: return "AttachmentFeedbackLoopEnableEXT"; + case DynamicState::eLineStippleKHR: return "LineStippleKHR"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -5985,8 +6020,8 @@ namespace VULKAN_HPP_NAMESPACE case DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR: return "PushDescriptorKHR"; case DescriptorSetLayoutCreateFlagBits::eDescriptorBufferEXT: return "DescriptorBufferEXT"; case DescriptorSetLayoutCreateFlagBits::eEmbeddedImmutableSamplersEXT: return "EmbeddedImmutableSamplersEXT"; - case DescriptorSetLayoutCreateFlagBits::eHostOnlyPoolEXT: return "HostOnlyPoolEXT"; case DescriptorSetLayoutCreateFlagBits::eIndirectBindableNV: return "IndirectBindableNV"; + case DescriptorSetLayoutCreateFlagBits::eHostOnlyPoolEXT: return "HostOnlyPoolEXT"; case DescriptorSetLayoutCreateFlagBits::ePerStageNV: return "PerStageNV"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } @@ -6010,9 +6045,9 @@ namespace VULKAN_HPP_NAMESPACE case DescriptorType::eInlineUniformBlock: return "InlineUniformBlock"; case DescriptorType::eAccelerationStructureKHR: return "AccelerationStructureKHR"; case DescriptorType::eAccelerationStructureNV: return "AccelerationStructureNV"; - case DescriptorType::eMutableEXT: return "MutableEXT"; case DescriptorType::eSampleWeightImageQCOM: return "SampleWeightImageQCOM"; case DescriptorType::eBlockMatchImageQCOM: return "BlockMatchImageQCOM"; + case DescriptorType::eMutableEXT: return "MutableEXT"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -6051,8 +6086,8 @@ namespace VULKAN_HPP_NAMESPACE case AccessFlagBits::eColorAttachmentReadNoncoherentEXT: return "ColorAttachmentReadNoncoherentEXT"; case AccessFlagBits::eAccelerationStructureReadKHR: return "AccelerationStructureReadKHR"; case AccessFlagBits::eAccelerationStructureWriteKHR: return "AccelerationStructureWriteKHR"; - case AccessFlagBits::eFragmentShadingRateAttachmentReadKHR: return "FragmentShadingRateAttachmentReadKHR"; case AccessFlagBits::eFragmentDensityMapReadEXT: return "FragmentDensityMapReadEXT"; + case AccessFlagBits::eFragmentShadingRateAttachmentReadKHR: return "FragmentShadingRateAttachmentReadKHR"; case AccessFlagBits::eCommandPreprocessReadNV: return "CommandPreprocessReadNV"; case AccessFlagBits::eCommandPreprocessWriteNV: return "CommandPreprocessWriteNV"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; @@ -6240,7 +6275,7 @@ namespace VULKAN_HPP_NAMESPACE { case SubpassContents::eInline: return "Inline"; case SubpassContents::eSecondaryCommandBuffers: return "SecondaryCommandBuffers"; - case SubpassContents::eInlineAndSecondaryCommandBuffersEXT: return "InlineAndSecondaryCommandBuffersEXT"; + case SubpassContents::eInlineAndSecondaryCommandBuffersKHR: return "InlineAndSecondaryCommandBuffersKHR"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -6498,7 +6533,8 @@ namespace VULKAN_HPP_NAMESPACE case DriverId::eMesaDozen: return "MesaDozen"; case DriverId::eMesaNvk: return "MesaNvk"; case DriverId::eImaginationOpenSourceMESA: return "ImaginationOpenSourceMESA"; - case DriverId::eMesaAgxv: return "MesaAgxv"; + case DriverId::eMesaHoneykrisp: return "MesaHoneykrisp"; + case DriverId::eReserved27: return "Reserved27"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -6723,8 +6759,8 @@ namespace VULKAN_HPP_NAMESPACE case RenderingFlagBits::eContentsSecondaryCommandBuffers: return "ContentsSecondaryCommandBuffers"; case RenderingFlagBits::eSuspending: return "Suspending"; case RenderingFlagBits::eResuming: return "Resuming"; - case RenderingFlagBits::eContentsInlineEXT: return "ContentsInlineEXT"; case RenderingFlagBits::eEnableLegacyDitheringEXT: return "EnableLegacyDitheringEXT"; + case RenderingFlagBits::eContentsInlineKHR: return "ContentsInlineKHR"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -8247,7 +8283,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case VideoEncodeCapabilityFlagBitsKHR::ePrecedingExternallyEncodedBytes: return "PrecedingExternallyEncodedBytes"; - case VideoEncodeCapabilityFlagBitsKHR::eInsufficientstreamBufferRangeDetectionBit: return "InsufficientstreamBufferRangeDetectionBit"; + case VideoEncodeCapabilityFlagBitsKHR::eInsufficientBitstreamBufferRangeDetection: return "InsufficientBitstreamBufferRangeDetection"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -8256,9 +8292,9 @@ namespace VULKAN_HPP_NAMESPACE { switch ( value ) { - case VideoEncodeFeedbackFlagBitsKHR::estreamBufferOffsetBit: return "streamBufferOffsetBit"; - case VideoEncodeFeedbackFlagBitsKHR::estreamBytesWrittenBit: return "streamBytesWrittenBit"; - case VideoEncodeFeedbackFlagBitsKHR::estreamHasOverridesBit: return "streamHasOverridesBit"; + case VideoEncodeFeedbackFlagBitsKHR::eBitstreamBufferOffset: return "BitstreamBufferOffset"; + case VideoEncodeFeedbackFlagBitsKHR::eBitstreamBytesWritten: return "BitstreamBytesWritten"; + case VideoEncodeFeedbackFlagBitsKHR::eBitstreamHasOverrides: return "BitstreamHasOverrides"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -8813,6 +8849,7 @@ namespace VULKAN_HPP_NAMESPACE case PipelineCreateFlagBits2KHR::eDisableOptimization: return "DisableOptimization"; case PipelineCreateFlagBits2KHR::eAllowDerivatives: return "AllowDerivatives"; case PipelineCreateFlagBits2KHR::eDerivative: return "Derivative"; + case PipelineCreateFlagBits2KHR::eEnableLegacyDitheringEXT: return "EnableLegacyDitheringEXT"; case PipelineCreateFlagBits2KHR::eViewIndexFromDeviceIndex: return "ViewIndexFromDeviceIndex"; case PipelineCreateFlagBits2KHR::eDispatchBase: return "DispatchBase"; case PipelineCreateFlagBits2KHR::eDeferCompileNV: return "DeferCompileNV"; @@ -8841,6 +8878,7 @@ namespace VULKAN_HPP_NAMESPACE case PipelineCreateFlagBits2KHR::eProtectedAccessOnlyEXT: return "ProtectedAccessOnlyEXT"; case PipelineCreateFlagBits2KHR::eRayTracingDisplacementMicromapNV: return "RayTracingDisplacementMicromapNV"; case PipelineCreateFlagBits2KHR::eDescriptorBufferEXT: return "DescriptorBufferEXT"; + case PipelineCreateFlagBits2KHR::eCaptureData: return "CaptureData"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -8881,6 +8919,29 @@ namespace VULKAN_HPP_NAMESPACE } } + //=== VK_AMD_anti_lag === + + VULKAN_HPP_INLINE std::string to_string( AntiLagModeAMD value ) + { + switch ( value ) + { + case AntiLagModeAMD::eDriverControl: return "DriverControl"; + case AntiLagModeAMD::eOn: return "On"; + case AntiLagModeAMD::eOff: return "Off"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + VULKAN_HPP_INLINE std::string to_string( AntiLagStageAMD value ) + { + switch ( value ) + { + case AntiLagStageAMD::eInput: return "Input"; + case AntiLagStageAMD::ePresent: return "Present"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + //=== VK_EXT_shader_object === VULKAN_HPP_INLINE std::string to_string( ShaderCreateFlagBitsEXT value ) @@ -9069,5 +9130,20 @@ namespace VULKAN_HPP_NAMESPACE } } + //=== VK_KHR_maintenance7 === + + VULKAN_HPP_INLINE std::string to_string( PhysicalDeviceLayeredApiKHR value ) + { + switch ( value ) + { + case PhysicalDeviceLayeredApiKHR::eVulkan: return "Vulkan"; + case PhysicalDeviceLayeredApiKHR::eD3D12: return "D3D12"; + case PhysicalDeviceLayeredApiKHR::eMetal: return "Metal"; + case PhysicalDeviceLayeredApiKHR::eOpengl: return "Opengl"; + case PhysicalDeviceLayeredApiKHR::eOpengles: return "Opengles"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + } // namespace VULKAN_HPP_NAMESPACE #endif